diff --git a/src/gt4py/next/common.py b/src/gt4py/next/common.py index b18da8e9a7..4aa0dd03aa 100644 --- a/src/gt4py/next/common.py +++ b/src/gt4py/next/common.py @@ -70,6 +70,20 @@ def __str__(self) -> str: return self.value +def dimension_to_implicit_offset(dim: str) -> str: + """ + Return name of offset implicitly defined by a dimension. + + Each dimension implicitly also defines an offset, such that we can allow syntax like:: + + field(TDim + 1) + + without having to explicitly define an offset for ``TDim``. This function defines the respective + naming convention. + """ + return f"_{dim}Off" + + @dataclasses.dataclass(frozen=True) class Dimension: value: str @@ -81,6 +95,18 @@ def __str__(self) -> str: def __call__(self, val: int) -> NamedIndex: return NamedIndex(self, val) + def __add__(self, offset: int) -> ConnectivityField: + # TODO(sf-n): just to avoid circular import. Move or refactor the FieldOffset to avoid this. + from gt4py.next.ffront import fbuiltins + + assert isinstance(self.value, str) + return fbuiltins.FieldOffset( + dimension_to_implicit_offset(self.value), source=self, target=(self,) + )[offset] + + def __sub__(self, offset: int) -> ConnectivityField: + return self + (-offset) + class Infinity(enum.Enum): """Describes an unbounded `UnitRange`.""" @@ -672,7 +698,11 @@ def as_scalar(self) -> core_defs.ScalarT: ... # Operators @abc.abstractmethod - def __call__(self, index_field: ConnectivityField | fbuiltins.FieldOffset) -> Field: ... + def __call__( + self, + index_field: ConnectivityField | fbuiltins.FieldOffset, + *args: ConnectivityField | fbuiltins.FieldOffset, + ) -> Field: ... @abc.abstractmethod def __getitem__(self, item: AnyIndexSpec) -> Self: ... @@ -992,7 +1022,11 @@ def inverse_image(self, image_range: UnitRange | NamedRange) -> Sequence[NamedRa assert isinstance(image_range, UnitRange) return (named_range((self.domain_dim, image_range - self.offset)),) - def premap(self, index_field: ConnectivityField | fbuiltins.FieldOffset) -> ConnectivityField: + def premap( + self, + index_field: ConnectivityField | fbuiltins.FieldOffset, + *args: ConnectivityField | fbuiltins.FieldOffset, + ) -> ConnectivityField: raise NotImplementedError() __call__ = premap diff --git a/src/gt4py/next/embedded/nd_array_field.py b/src/gt4py/next/embedded/nd_array_field.py index 736058b175..655a1137e8 100644 --- a/src/gt4py/next/embedded/nd_array_field.py +++ b/src/gt4py/next/embedded/nd_array_field.py @@ -315,7 +315,16 @@ def premap( assert len(conn_fields) == 1 return _remapping_premap(self, conn_fields[0]) - __call__ = premap # type: ignore[assignment] + def __call__( + self, + index_field: common.ConnectivityField | fbuiltins.FieldOffset, + *args: common.ConnectivityField | fbuiltins.FieldOffset, + ) -> common.Field: + return functools.reduce( + lambda field, current_index_field: field.premap(current_index_field), + [index_field, *args], + self, + ) def restrict(self, index: common.AnyIndexSpec) -> NdArrayField: new_domain, buffer_slice = self._slice(index) diff --git a/src/gt4py/next/ffront/decorator.py b/src/gt4py/next/ffront/decorator.py index a4239c1131..c9146d44df 100644 --- a/src/gt4py/next/ffront/decorator.py +++ b/src/gt4py/next/ffront/decorator.py @@ -26,6 +26,7 @@ from gt4py.next import ( allocators as next_allocators, backend as next_backend, + common, embedded as next_embedded, errors, ) @@ -185,7 +186,35 @@ def itir(self) -> itir.FencilDefinition: return self.backend.transforms_prog.past_to_itir(no_args_past).program return past_to_itir.PastToItirFactory()(no_args_past).program + @functools.cached_property + def _implicit_offset_provider(self) -> dict[common.Tag, common.OffsetProviderElem]: + """ + Add all implicit offset providers. + + Each dimension implicitly defines an offset provider such that we can allow syntax like:: + + field(TDim + 1) + + This function adds these implicit offset providers. + """ + # TODO(tehrengruber): We add all dimensions here regardless of whether they are cartesian + # or unstructured. While it is conceptually fine, but somewhat meaningless, + # to do something `Cell+1` the GTFN backend for example doesn't support these. We should + # find a way to avoid adding these dimensions, but since we don't have the grid type here + # and since the dimensions don't this information either, we just add all dimensions here + # and filter them out in the backends that don't support this. + implicit_offset_provider = {} + params = self.past_stage.past_node.params + for param in params: + if isinstance(param.type, ts.FieldType): + for dim in param.type.dims: + implicit_offset_provider.update( + {common.dimension_to_implicit_offset(dim.value): dim} + ) + return implicit_offset_provider + def __call__(self, *args, offset_provider: dict[str, Dimension], **kwargs: Any) -> None: + offset_provider = offset_provider | self._implicit_offset_provider if self.backend is None: warnings.warn( UserWarning( diff --git a/src/gt4py/next/ffront/foast_passes/type_deduction.py b/src/gt4py/next/ffront/foast_passes/type_deduction.py index e12534da92..d334487ae1 100644 --- a/src/gt4py/next/ffront/foast_passes/type_deduction.py +++ b/src/gt4py/next/ffront/foast_passes/type_deduction.py @@ -589,6 +589,18 @@ def _deduce_compare_type( def _deduce_binop_type( self, node: foast.BinOp, *, left: foast.Expr, right: foast.Expr, **kwargs: Any ) -> Optional[ts.TypeSpec]: + # e.g. `IDim+1` + if ( + isinstance(left.type, ts.DimensionType) + and isinstance(right.type, ts.ScalarType) + and type_info.is_integral(right.type) + ): + return ts.OffsetType(source=left.type.dim, target=(left.type.dim,)) + if isinstance(left.type, ts.OffsetType): + raise errors.DSLError( + node.location, f"Type '{left.type}' can not be used in operator '{node.op}'." + ) + logical_ops = { dialect_ast_enums.BinaryOperator.BIT_AND, dialect_ast_enums.BinaryOperator.BIT_OR, diff --git a/src/gt4py/next/ffront/foast_to_gtir.py b/src/gt4py/next/ffront/foast_to_gtir.py index 31168159dc..9c9af78d50 100644 --- a/src/gt4py/next/ffront/foast_to_gtir.py +++ b/src/gt4py/next/ffront/foast_to_gtir.py @@ -13,6 +13,7 @@ from gt4py import eve from gt4py.eve import utils as eve_utils from gt4py.eve.extended_typing import Never +from gt4py.next import common from gt4py.next.ffront import ( dialect_ast_enums, experimental as experimental_builtins, @@ -219,25 +220,62 @@ def visit_Compare(self, node: foast.Compare, **kwargs: Any) -> itir.FunCall: return self._map(node.op.value, node.left, node.right) def _visit_shift(self, node: foast.Call, **kwargs: Any) -> itir.Expr: - match node.args[0]: - case foast.Subscript(value=foast.Name(id=offset_name), index=int(offset_index)): - shift_offset = im.shift(offset_name, offset_index) - return im.as_fieldop(im.lambda_("__it")(im.deref(shift_offset("__it"))))( - self.visit(node.func, **kwargs) - ) - case foast.Name(id=offset_name): - return im.as_fieldop_neighbors(str(offset_name), self.visit(node.func, **kwargs)) - case foast.Call(func=foast.Name(id="as_offset")): - # TODO(havogt): discuss this representation - func_args = node.args[0] - offset_dim = func_args.args[0] - assert isinstance(offset_dim, foast.Name) - shift_offset = im.shift(offset_dim.id, im.deref("__offset")) - return im.as_fieldop( - im.lambda_("__it", "__offset")(im.deref(shift_offset("__it"))) - )(self.visit(node.func, **kwargs), self.visit(func_args.args[1], **kwargs)) - case _: - raise FieldOperatorLoweringError("Unexpected shift arguments!") + current_expr = self.visit(node.func, **kwargs) + + for arg in node.args: + match arg: + # `field(Off[idx])` + case foast.Subscript(value=foast.Name(id=offset_name), index=int(offset_index)): + current_expr = im.as_fieldop( + im.lambda_("__it")(im.deref(im.shift(offset_name, offset_index)("__it"))) + )(current_expr) + # `field(Dim + idx)` + case foast.BinOp( + op=dialect_ast_enums.BinaryOperator.ADD + | dialect_ast_enums.BinaryOperator.SUB, + left=foast.Name(id=dimension), # TODO(tehrengruber): use type of lhs + right=foast.Constant(value=offset_index), + ): + if arg.op == dialect_ast_enums.BinaryOperator.SUB: + offset_index *= -1 + current_expr = im.as_fieldop( + # TODO(SF-N): we rely on the naming-convention that the cartesian dimensions + # are passed suffixed with `off`, e.g. the `K` is passed as `Koff` in the + # offset provider. This is a rather unclean solution and should be + # improved. + im.lambda_("__it")( + im.deref( + im.shift( + common.dimension_to_implicit_offset(dimension), offset_index + )("__it") + ) + ) + )(current_expr) + # `field(Off)` + case foast.Name(id=offset_name): + # only a single unstructured shift is supported so returning here is fine even though we + # are in a loop. + assert len(node.args) == 1 and len(arg.type.target) > 1 # type: ignore[attr-defined] # ensured by pattern + return im.as_fieldop_neighbors( + str(offset_name), self.visit(node.func, **kwargs) + ) + # `field(as_offset(Off, offset_field))` + case foast.Call(func=foast.Name(id="as_offset")): + func_args = arg + # TODO(tehrengruber): Discuss representation. We could use the type system to + # deduce the offset dimension instead of (e.g. to allow aliasing). + offset_dim = func_args.args[0] + assert isinstance(offset_dim, foast.Name) + offset_field = self.visit(func_args.args[1], **kwargs) + current_expr = im.as_fieldop( + im.lambda_("__it", "__offset")( + im.deref(im.shift(offset_dim.id, im.deref("__offset"))("__it")) + ) + )(current_expr, offset_field) + case _: + raise FieldOperatorLoweringError("Unexpected shift arguments!") + + return current_expr def visit_Call(self, node: foast.Call, **kwargs: Any) -> itir.Expr: if type_info.type_class(node.func.type) is ts.FieldType: diff --git a/src/gt4py/next/ffront/foast_to_itir.py b/src/gt4py/next/ffront/foast_to_itir.py index dbcd6c8d47..64b7c66161 100644 --- a/src/gt4py/next/ffront/foast_to_itir.py +++ b/src/gt4py/next/ffront/foast_to_itir.py @@ -14,6 +14,7 @@ from gt4py.eve import NodeTranslator, PreserveLocationVisitor from gt4py.eve.extended_typing import Never from gt4py.eve.utils import UIDGenerator +from gt4py.next import common from gt4py.next.ffront import ( dialect_ast_enums, fbuiltins, @@ -276,23 +277,60 @@ def visit_Compare(self, node: foast.Compare, **kwargs: Any) -> itir.FunCall: return self._map(node.op.value, node.left, node.right) def _visit_shift(self, node: foast.Call, **kwargs: Any) -> itir.Expr: - match node.args[0]: - case foast.Subscript(value=foast.Name(id=offset_name), index=int(offset_index)): - shift_offset = im.shift(offset_name, offset_index) - case foast.Name(id=offset_name): - return im.lifted_neighbors(str(offset_name), self.visit(node.func, **kwargs)) - case foast.Call(func=foast.Name(id="as_offset")): - func_args = node.args[0] - offset_dim = func_args.args[0] - assert isinstance(offset_dim, foast.Name) - shift_offset = im.shift( - offset_dim.id, im.deref(self.visit(func_args.args[1], **kwargs)) - ) - case _: - raise FieldOperatorLoweringError("Unexpected shift arguments!") - return im.lift(im.lambda_("it")(im.deref(shift_offset("it"))))( - self.visit(node.func, **kwargs) - ) + current_expr = self.visit(node.func, **kwargs) + + for arg in node.args: + match arg: + # `field(Off[idx])` + case foast.Subscript(value=foast.Name(id=offset_name), index=int(offset_index)): + current_expr = im.lift( + im.lambda_("it")(im.deref(im.shift(offset_name, offset_index)("it"))) + )(current_expr) + # `field(Dim + idx)` + case foast.BinOp( + op=dialect_ast_enums.BinaryOperator.ADD + | dialect_ast_enums.BinaryOperator.SUB, + left=foast.Name(id=dimension), + right=foast.Constant(value=offset_index), + ): + if arg.op == dialect_ast_enums.BinaryOperator.SUB: + offset_index *= -1 + current_expr = im.lift( + # TODO(SF-N): we rely on the naming-convention that the cartesian dimensions + # are passed suffixed with `off`, e.g. the `K` is passed as `Koff` in the + # offset provider. This is a rather unclean solution and should be + # improved. + im.lambda_("it")( + im.deref( + im.shift( + common.dimension_to_implicit_offset(dimension), offset_index + )("it") + ) + ) + )(current_expr) + # `field(Off)` + case foast.Name(id=offset_name): + # only a single unstructured shift is supported so returning here is fine even though we + # are in a loop. + assert len(node.args) == 1 and len(arg.type.target) > 1 # type: ignore[attr-defined] # ensured by pattern + return im.lifted_neighbors(str(offset_name), self.visit(node.func, **kwargs)) + # `field(as_offset(Off, offset_field))` + case foast.Call(func=foast.Name(id="as_offset")): + func_args = arg + # TODO(tehrengruber): Use type system to deduce the offset dimension instead of + # (e.g. to allow aliasing) + offset_dim = func_args.args[0] + assert isinstance(offset_dim, foast.Name) + offset_it = self.visit(func_args.args[1], **kwargs) + current_expr = im.lift( + im.lambda_("it", "offset")( + im.deref(im.shift(offset_dim.id, im.deref("offset"))("it")) + ) + )(current_expr, offset_it) + case _: + raise FieldOperatorLoweringError("Unexpected shift arguments!") + + return current_expr def visit_Call(self, node: foast.Call, **kwargs: Any) -> itir.Expr: if type_info.type_class(node.func.type) is ts.FieldType: diff --git a/src/gt4py/next/ffront/past_to_itir.py b/src/gt4py/next/ffront/past_to_itir.py index 61a96694ee..c8defcfe05 100644 --- a/src/gt4py/next/ffront/past_to_itir.py +++ b/src/gt4py/next/ffront/past_to_itir.py @@ -15,7 +15,7 @@ import factory from gt4py.eve import NodeTranslator, concepts, traits -from gt4py.next import common, config +from gt4py.next import common, config, errors from gt4py.next.ffront import ( fbuiltins, gtcallable, @@ -435,9 +435,10 @@ def _compute_field_slice(node: past.Subscript) -> list[past.Slice]: node_dims = cast(ts.FieldType, node.type).dims assert isinstance(node_dims, list) if isinstance(node.type, ts.FieldType) and len(out_field_slice_) != len(node_dims): - raise ValueError( + raise errors.DSLError( + node.location, f"Too many indices for field '{out_field_name}': field is {len(node_dims)}" - f"-dimensional, but {len(out_field_slice_)} were indexed." + f"-dimensional, but {len(out_field_slice_)} were indexed.", ) return out_field_slice_ diff --git a/src/gt4py/next/iterator/embedded.py b/src/gt4py/next/iterator/embedded.py index e72e12779a..f40937565e 100644 --- a/src/gt4py/next/iterator/embedded.py +++ b/src/gt4py/next/iterator/embedded.py @@ -1153,7 +1153,11 @@ def as_scalar(self) -> core_defs.IntegralScalar: assert self._cur_index is not None return self._cur_index - def premap(self, index_field: common.ConnectivityField | fbuiltins.FieldOffset) -> common.Field: + def premap( + self, + index_field: common.ConnectivityField | fbuiltins.FieldOffset, + *args: common.ConnectivityField | fbuiltins.FieldOffset, + ) -> common.Field: # TODO can be implemented by constructing and ndarray (but do we know of which kind?) raise NotImplementedError() @@ -1273,7 +1277,11 @@ def ndarray(self) -> core_defs.NDArrayObject: def asnumpy(self) -> np.ndarray: raise NotImplementedError() - def premap(self, index_field: common.ConnectivityField | fbuiltins.FieldOffset) -> common.Field: + def premap( + self, + index_field: common.ConnectivityField | fbuiltins.FieldOffset, + *args: common.ConnectivityField | fbuiltins.FieldOffset, + ) -> common.Field: # TODO can be implemented by constructing and ndarray (but do we know of which kind?) raise NotImplementedError() diff --git a/src/gt4py/next/program_processors/codegens/gtfn/itir_to_gtfn_ir.py b/src/gt4py/next/program_processors/codegens/gtfn/itir_to_gtfn_ir.py index 6eded7433a..67b2c5af67 100644 --- a/src/gt4py/next/program_processors/codegens/gtfn/itir_to_gtfn_ir.py +++ b/src/gt4py/next/program_processors/codegens/gtfn/itir_to_gtfn_ir.py @@ -154,6 +154,17 @@ def _collect_offset_definitions( ) else: assert grid_type == common.GridType.UNSTRUCTURED + # TODO(tehrengruber): The implicit offset providers added to support syntax like + # `KDim+1` can also include horizontal dimensions. Cartesian shifts in this + # dimension are not supported by the backend and also never occur in user code. + # We just skip these here for now, but this is not a clean solution. Not having + # any unstructured dimensions in here would be preferred. + if ( + dim.kind == common.DimensionKind.HORIZONTAL + and offset_name == common.dimension_to_implicit_offset(dim.value) + ): + continue + if not dim.kind == common.DimensionKind.VERTICAL: raise ValueError( "Mapping an offset to a horizontal dimension in unstructured is not allowed." diff --git a/src/gt4py/next/program_processors/runners/dace_iterator/__init__.py b/src/gt4py/next/program_processors/runners/dace_iterator/__init__.py index 2df59140ee..300fc5bdaa 100644 --- a/src/gt4py/next/program_processors/runners/dace_iterator/__init__.py +++ b/src/gt4py/next/program_processors/runners/dace_iterator/__init__.py @@ -345,7 +345,9 @@ def __sdfg__(self, *args, **kwargs) -> dace.sdfg.sdfg.SDFG: raise ValueError( "[DaCe Orchestration] Connectivities -at compile time- are required to generate the SDFG. Use `with_connectivities` method." ) - offset_provider = self.connectivities # tables are None at this point + offset_provider = ( + self.connectivities | self._implicit_offset_provider + ) # tables are None at this point sdfg = self.backend.executor.otf_workflow.step.translation.generate_sdfg( # type: ignore[union-attr] self.itir, diff --git a/src/gt4py/next/program_processors/runners/gtfn.py b/src/gt4py/next/program_processors/runners/gtfn.py index afd7b7af15..95e72983a8 100644 --- a/src/gt4py/next/program_processors/runners/gtfn.py +++ b/src/gt4py/next/program_processors/runners/gtfn.py @@ -101,7 +101,10 @@ def compilation_hash(otf_closure: stages.ProgramCall) -> int: # As the frontend types contain lists they are not hashable. As a workaround we just # use content_hash here. content_hash(tuple(from_value(arg) for arg in otf_closure.args)), - id(offset_provider) if offset_provider else None, + # Directly using the `id` of the offset provider is not possible as the decorator adds + # the implicitly defined ones (i.e. to allow the `TDim + 1` syntax) resulting in a + # different `id` every time. Instead use the `id` of each individual offset provider. + tuple((k, id(v)) for (k, v) in offset_provider.items()) if offset_provider else None, otf_closure.kwargs.get("column_axis", None), ) ) diff --git a/src/gt4py/next/type_system/type_info.py b/src/gt4py/next/type_system/type_info.py index 372d21613e..5ae820e969 100644 --- a/src/gt4py/next/type_system/type_info.py +++ b/src/gt4py/next/type_system/type_info.py @@ -718,20 +718,23 @@ def function_signature_incompatibilities_field( args: list[ts.TypeSpec], kwargs: dict[str, ts.TypeSpec], ) -> Iterator[str]: - if len(args) != 1: - yield f"Function takes 1 argument, but {len(args)} were given." - return - - if not isinstance(args[0], ts.OffsetType): - yield f"Expected first argument to be of type '{ts.OffsetType}', got '{args[0]}'." + if len(args) < 1: + yield f"Function takes at least 1 argument, but {len(args)} were given." return + for arg in args: + if not isinstance(arg, ts.OffsetType): + yield f"Expected arguments to be of type '{ts.OffsetType}', got '{arg}'." + return + if len(args) > 1 and len(arg.target) > 1: + yield f"Function takes only 1 argument in unstructured case, but {len(args)} were given." + return if kwargs: yield f"Got unexpected keyword argument(s) '{', '.join(kwargs.keys())}'." return - source_dim = args[0].source - target_dims = args[0].target + source_dim = args[0].source # type: ignore[attr-defined] # ensured by loop above + target_dims = args[0].target # type: ignore[attr-defined] # ensured by loop above # TODO: This code does not handle ellipses for dimensions. Fix it. assert field_type.dims is not ... if field_type.dims and source_dim not in field_type.dims: diff --git a/tests/next_tests/integration_tests/cases.py b/tests/next_tests/integration_tests/cases.py index dd75c2645f..ecc466cc43 100644 --- a/tests/next_tests/integration_tests/cases.py +++ b/tests/next_tests/integration_tests/cases.py @@ -466,7 +466,11 @@ def verify_with_default_data( def cartesian_case(exec_alloc_descriptor: test_definitions.ExecutionAndAllocatorDescriptor): yield Case( exec_alloc_descriptor if exec_alloc_descriptor.executor else None, - offset_provider={"Ioff": IDim, "Joff": JDim, "Koff": KDim}, + offset_provider={ + "Ioff": IDim, + "Joff": JDim, + "Koff": KDim, + }, default_sizes={IDim: 10, JDim: 10, KDim: 10}, grid_type=common.GridType.CARTESIAN, allocator=exec_alloc_descriptor.allocator, diff --git a/tests/next_tests/integration_tests/feature_tests/ffront_tests/test_execution.py b/tests/next_tests/integration_tests/feature_tests/ffront_tests/test_execution.py index 26825f9e4c..f8c806a06b 100644 --- a/tests/next_tests/integration_tests/feature_tests/ffront_tests/test_execution.py +++ b/tests/next_tests/integration_tests/feature_tests/ffront_tests/test_execution.py @@ -443,6 +443,8 @@ def test_offset_field(cartesian_case): @gtx.field_operator def testee(a: cases.IKField, offset_field: cases.IKField) -> gtx.Field[[IDim, KDim], bool]: a_i = a(as_offset(Ioff, offset_field)) + # note: this leads to an access to offset_field in + # IDim: (0, out.size[I]), KDim: (0, out.size[K]+1) a_i_k = a_i(as_offset(Koff, offset_field)) b_i = a(Ioff[1]) b_i_k = b_i(Koff[1]) @@ -450,9 +452,11 @@ def testee(a: cases.IKField, offset_field: cases.IKField) -> gtx.Field[[IDim, KD out = cases.allocate(cartesian_case, testee, cases.RETURN)() a = cases.allocate(cartesian_case, testee, "a").extend({IDim: (0, 1), KDim: (0, 1)})() - offset_field = cases.allocate(cartesian_case, testee, "offset_field").strategy( - cases.ConstInitializer(1) - )() + offset_field = ( + cases.allocate(cartesian_case, testee, "offset_field") + .strategy(cases.ConstInitializer(1)) + .extend({KDim: (0, 1)})() + ) # see comment at a_i_k for domain bounds cases.verify( cartesian_case, @@ -461,12 +465,10 @@ def testee(a: cases.IKField, offset_field: cases.IKField) -> gtx.Field[[IDim, KD offset_field, out=out, offset_provider={"Ioff": IDim, "Koff": KDim}, - ref=np.full_like(offset_field.asnumpy(), True, dtype=bool), + ref=ref, comparison=lambda out, ref: np.all(out == ref), ) - assert np.allclose(out.asnumpy(), ref) - def test_nested_tuple_return(cartesian_case): @gtx.field_operator diff --git a/tests/next_tests/integration_tests/multi_feature_tests/ffront_tests/test_laplacian.py b/tests/next_tests/integration_tests/multi_feature_tests/ffront_tests/test_laplacian.py index 854957afeb..d44da6ff7e 100644 --- a/tests/next_tests/integration_tests/multi_feature_tests/ffront_tests/test_laplacian.py +++ b/tests/next_tests/integration_tests/multi_feature_tests/ffront_tests/test_laplacian.py @@ -17,7 +17,6 @@ exec_alloc_descriptor, ) - pytestmark = pytest.mark.uses_cartesian_shift @@ -25,10 +24,21 @@ def lap(in_field: gtx.Field[[IDim, JDim], "float"]) -> gtx.Field[[IDim, JDim], "float"]: return ( -4.0 * in_field - + in_field(Ioff[1]) - + in_field(Joff[1]) - + in_field(Ioff[-1]) - + in_field(Joff[-1]) + + in_field(IDim + 1) + + in_field(JDim + 1) + + in_field(IDim - 1) + + in_field(JDim - 1) + ) + + +@gtx.field_operator +def skewedlap(in_field: gtx.Field[[IDim, JDim], "float"]) -> gtx.Field[[IDim, JDim], "float"]: + return ( + -4.0 * in_field + + in_field(IDim + 1, JDim + 1) + + in_field(IDim + 1, JDim - 1) + + in_field(IDim - 1, JDim + 1) + + in_field(IDim - 1, JDim - 1) ) @@ -44,6 +54,14 @@ def lap_program( lap(in_field, out=out_field[1:-1, 1:-1]) +@gtx.program +def skewedlap_program( + in_field: gtx.Field[[IDim, JDim], "float"], + out_field: gtx.Field[[IDim, JDim], "float"], +): + skewedlap(in_field, out=out_field[1:-1, 1:-1]) + + @gtx.program def laplap_program( in_field: gtx.Field[[IDim, JDim], "float"], out_field: gtx.Field[[IDim, JDim], "float"] @@ -51,13 +69,24 @@ def laplap_program( laplap(in_field, out=out_field[2:-2, 2:-2]) +def square(inp): + """Compute the square of the field entries""" + return inp[:, :] * inp[:, :] + + def lap_ref(inp): """Compute the laplacian using numpy""" - return -4.0 * inp[1:-1, 1:-1] + inp[:-2, 1:-1] + inp[2:, 1:-1] + inp[1:-1, :-2] + inp[1:-1, 2:] + return -4.0 * inp[1:-1, 1:-1] + inp[2:, 1:-1] + inp[1:-1, 2:] + inp[:-2, 1:-1] + inp[1:-1, :-2] + + +def skewedlap_ref(inp): + """Compute the laplacian using numpy""" + return -4.0 * inp[1:-1, 1:-1] + inp[2:, 2:] + inp[2:, :-2] + inp[:-2, 2:] + inp[:-2, :-2] def test_ffront_lap(cartesian_case): in_field = cases.allocate(cartesian_case, lap_program, "in_field")() + in_field = square(in_field) out_field = cases.allocate(cartesian_case, lap_program, "out_field")() # cases.verify( @@ -69,7 +98,25 @@ def test_ffront_lap(cartesian_case): # ref=lap_ref(in_field.ndarray), # ) + +def test_ffront_skewedlap(cartesian_case): + in_field = cases.allocate(cartesian_case, skewedlap_program, "in_field")() + in_field = square(in_field) + out_field = cases.allocate(cartesian_case, skewedlap_program, "out_field")() + + cases.verify( + cartesian_case, + skewedlap_program, + in_field, + out_field, + inout=out_field[1:-1, 1:-1], + ref=skewedlap_ref(in_field.ndarray), + ) + + +def test_ffront_laplap(cartesian_case): in_field = cases.allocate(cartesian_case, laplap_program, "in_field")() + in_field = square(in_field) out_field = cases.allocate(cartesian_case, laplap_program, "out_field")() cases.verify( diff --git a/tests/next_tests/unit_tests/ffront_tests/test_foast_to_gtir.py b/tests/next_tests/unit_tests/ffront_tests/test_foast_to_gtir.py index 367d536322..978bd3f7cc 100644 --- a/tests/next_tests/unit_tests/ffront_tests/test_foast_to_gtir.py +++ b/tests/next_tests/unit_tests/ffront_tests/test_foast_to_gtir.py @@ -27,6 +27,7 @@ min_over, neighbor_sum, where, + common, ) from gt4py.next.ffront import type_specifications as ts_ffront from gt4py.next.ffront.ast_passes import single_static_assign as ssa @@ -119,6 +120,22 @@ def foo(inp: gtx.Field[[TDim], float64]): assert lowered.expr == reference +def test_premap_cartesian_syntax(): + def foo(inp: gtx.Field[[TDim], float64]): + return inp(TDim + 1) + + parsed = FieldOperatorParser.apply_to_function(foo) + lowered = FieldOperatorLowering.apply(parsed) + + reference = im.as_fieldop( + im.lambda_("__it")( + im.deref(im.shift(common.dimension_to_implicit_offset(TDim.value), 1)("__it")) + ) + )("inp") + + assert lowered.expr == reference + + def test_as_offset(): def foo(inp: gtx.Field[[TDim], float64], offset: gtx.Field[[TDim], int]): return inp(as_offset(TOff, offset))