From ab0d584b8531eb9e1ab2279cfd61c3fc440239d8 Mon Sep 17 00:00:00 2001 From: Addison Alvey-Blanco Date: Mon, 4 Mar 2024 13:52:31 -0600 Subject: [PATCH 1/6] Initial axis tagging machinery --- grudge/array_context.py | 77 ++++++++++++++++--------------------- grudge/op.py | 84 ++++++++++++++++------------------------- 2 files changed, 64 insertions(+), 97 deletions(-) diff --git a/grudge/array_context.py b/grudge/array_context.py index 7d6e9106f..0a55f628e 100644 --- a/grudge/array_context.py +++ b/grudge/array_context.py @@ -38,6 +38,7 @@ from dataclasses import dataclass from pytools.tag import Tag from meshmode.array_context import ( + DiscretizationDOFAxisTag, PyOpenCLArrayContext as _PyOpenCLArrayContextBase, PytatoPyOpenCLArrayContext as _PytatoPyOpenCLArrayContextBase) from warnings import warn @@ -124,29 +125,29 @@ def __init__(self, queue: "pyopencl.CommandQueue", super().__init__(queue, allocator, wait_event_queue_length, force_device_scalars) - def transform_loopy_program(self, t_unit): - knl = t_unit.default_entrypoint - - # {{{ process tensor product specific metadata - - if knl.tags_of_type(OutputIsTensorProductDOFArrayOrdered): - new_args = [] - for arg in knl.args: - if arg.is_output: - arg = arg.copy(dim_tags=( - f"N{len(arg.shape)-1}," - + ",".join(f"N{i}" - for i in range(len(arg.shape)-1)) - )) - - new_args.append(arg) - - knl = knl.copy(args=new_args) - t_unit = t_unit.with_kernel(knl) - - # }}} - - return super().transform_loopy_program(t_unit) + # def transform_loopy_program(self, t_unit): + # knl = t_unit.default_entrypoint + # + # # {{{ process tensor product specific metadata + # + # if knl.tags_of_type(OutputIsTensorProductDOFArrayOrdered): + # new_args = [] + # for arg in knl.args: + # if arg.is_output: + # arg = arg.copy(dim_tags=( + # f"N{len(arg.shape)-1}," + # + ",".join(f"N{i}" + # for i in range(len(arg.shape)-1)) + # )) + # + # new_args.append(arg) + # + # knl = knl.copy(args=new_args) + # t_unit = t_unit.with_kernel(knl) + # + # # }}} + # + # return super().transform_loopy_program(t_unit) # }}} @@ -176,28 +177,6 @@ def __init__(self, queue, allocator=None, super().__init__(queue, allocator, compile_trace_callback=compile_trace_callback) - def transform_loopy_program(self, t_unit): - knl = t_unit.default_entrypoint - - # {{{ process tensor product specific metadata - - if knl.tags_of_type(OutputIsTensorProductDOFArrayOrdered): - new_args = [] - for arg in knl.args: - if arg.is_output: - arg = arg.copy(dim_tags=( - f"N{len(arg.shape)-1}," - + ",".join(f"N{i}" - for i in range(len(arg.shape)-1)) - )) - - new_args.append(arg) - - knl = knl.copy(args=new_args) - - # }}} - - return super().transform_loopy_program(t_unit) # }}} @@ -694,12 +673,20 @@ class OutputIsTensorProductDOFArrayOrdered(Tag): pass +class TensorProductDOFAxis(DiscretizationDOFAxisTag): + """ + TODO: Add doc + """ + pass + + class MassMatrix1d(Tag): """Used in DAG transformation to realize algebraic simplification of 1D inverse mass operator times mass operator. """ pass + class InverseMassMatrix1d(Tag): """See MassMatrix1d. """ diff --git a/grudge/op.py b/grudge/op.py index 93b55b11f..bf21af34f 100644 --- a/grudge/op.py +++ b/grudge/op.py @@ -89,7 +89,7 @@ from grudge.discretization import DiscretizationCollection from grudge.dof_desc import as_dofdesc -from grudge.array_context import OutputIsTensorProductDOFArrayOrdered +from grudge.array_context import OutputIsTensorProductDOFArrayOrdered, TensorProductDOFAxis from pytools import keyed_memoize_in from pytools.obj_array import make_obj_array @@ -202,8 +202,11 @@ def _single_axis_derivative_kernel( def compute_tensor_product_derivative(actx, grp, get_diff_mat, vec, ijm, xyz_axis, metric_in_matvec): - - vec = fold(grp.space, vec) + vec = tag_axes( + actx, + { i: TensorProductDOFAxis() for i in range(1, grp.dim+1) }, + fold(grp.space, vec) + ) if metric_in_matvec: stiff_1d, mass_1d = get_diff_mat(actx, grp, grp) @@ -213,8 +216,7 @@ def compute_tensor_product_derivative(actx, grp, get_diff_mat, vec, ijm, for ax in apply_mass_axes: vec_mass_applied = single_axis_operator_application( actx, grp.dim, mass_1d, ax, vec, - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tags=(FirstAxisIsElementsTag(),), arg_names=("mass_1d", "vec") ) @@ -222,8 +224,7 @@ def compute_tensor_product_derivative(actx, grp, get_diff_mat, vec, ijm, grp.space, single_axis_operator_application( actx, grp.dim, stiff_1d, xyz_axis, vec_mass_applied, - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tags=(FirstAxisIsElementsTag(),), arg_names=("stiff_1d", "vec_with_mass_applied")) ) @@ -242,8 +243,7 @@ def compute_tensor_product_derivative(actx, grp, get_diff_mat, vec, ijm, grp.space, single_axis_operator_application( actx, grp.dim, diff_mat, xyz_axis, vec, - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tags=(FirstAxisIsElementsTag(),), arg_names=("diff_mat", "vec")) ) @@ -304,21 +304,12 @@ def _gradient_kernel(actx, out_discr, in_discr, get_diff_mat, inv_jac_mat, vec, def compute_tensor_product_grad(actx, grp, diff_mat, vec, ijm, metric_in_matvec): - # TODO: add note about inverse mass simplification, point to - # op.inverse_mass (assuming this is where the explanation will live) - """ - """ - - if grp.dim > 3 and metric_in_matvec: - warn('Efficient tensor product weak ' - 'differentiation operators only ' - 'implemented for dimension 2 and 3. ' - 'Defaulting to inefficient version.') - return compute_simplicial_grad(actx, grp, grp, diff_mat, vec, ijm, - metric_in_matvec) - # reshape vector to expose tensor product structure - vec = fold(grp.space, vec) + vec = tag_axes( + actx, + { i: TensorProductDOFAxis() for i in range(1, grp.dim+1) }, + fold(grp.space, vec) + ) if metric_in_matvec: stiff_1d, mass_1d = get_diff_mat(actx, grp, grp) @@ -332,8 +323,7 @@ def compute_tensor_product_grad(actx, grp, diff_mat, vec, ijm, for ax in apply_mass_axes: grad[xyz_axis] = single_axis_operator_application( actx, grp.dim, mass_1d, ax, grad[xyz_axis], - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tags=(FirstAxisIsElementsTag(),), arg_names=("mass_1d", f"vec_{xyz_axis}") ) @@ -342,8 +332,7 @@ def compute_tensor_product_grad(actx, grp, diff_mat, vec, ijm, grp.space, single_axis_operator_application( actx, grp.dim, stiff_1d, xyz_axis, grad[xyz_axis], - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tags=(FirstAxisIsElementsTag(),), arg_names=("stiff_1d", f"vec_{xyz_axis}")) ) @@ -357,8 +346,7 @@ def compute_tensor_product_grad(actx, grp, diff_mat, vec, ijm, grp.space, single_axis_operator_application( actx, grp.dim, diff_mat, xyz_axis, grad[xyz_axis], - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tags=(FirstAxisIsElementsTag(),), arg_names=("diff_mat", f"vec_{xyz_axis}") ) ) @@ -421,21 +409,12 @@ def _divergence_kernel(actx, out_discr, in_discr, get_diff_mat, inv_jac_mat, vec # {{{ tensor product div def compute_tensor_product_div(actx, grp, diff_mat, vec, ijm): - """ - Exploits tensor product structure to reduce complexity. See - `_gradient_kernel.compute_tensor_product_grad` for more details. - """ - - if grp.dim > 3 and metric_in_matvec: - warn('Efficient tensor product weak ' - 'differentiation operators only ' - 'implemented for dimension 2 and 3. ' - 'Defaulting to inefficient version.') - return compute_simplicial_div(actx, grp, grp, diff_mat, vec, ijm, - metric_in_matvec) - vec = make_obj_array([ - fold(grp.space, vec[func_axis]) + tag_axes( + actx, + { i : TensorProductDOFAxis() for i in range(1, grp.dim+1) }, + fold(grp.space, vec[func_axis]) + ) for func_axis in range(vec.shape[0]) ]) @@ -452,15 +431,13 @@ def compute_tensor_product_div(actx, grp, diff_mat, vec, ijm): for ax in apply_mass_axes: ref[xyz_axis] = single_axis_operator_application( actx, grp.dim, mass_1d, ax, ref[xyz_axis], - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tags=(FirstAxisIsElementsTag(),), arg_names=("mass_1d", f"vec_{func_axis}_{xyz_axis}") ) ref[xyz_axis] = single_axis_operator_application( actx, grp.dim, stiff_1d, xyz_axis, ref[xyz_axis], - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tags=(FirstAxisIsElementsTag(),), arg_names=("stiff_1d", f"vec_{func_axis}_{xyz_axis}") ) @@ -477,8 +454,7 @@ def compute_tensor_product_div(actx, grp, diff_mat, vec, ijm): ref[xyz_axis] = single_axis_operator_application( actx, grp.dim, diff_mat, xyz_axis, ref[xyz_axis], - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tags=(FirstAxisIsElementsTag(),), arg_names=("diff_mat", f"vec_{func_axis}_{xyz_axis}") ) @@ -1488,9 +1464,13 @@ def single_axis_operator_application(actx, dim, operator, axis, data, spec = operator_spec + ',' + data_spec + '->' + out_spec - return actx.einsum(spec, operator, data, - arg_names=arg_names, - tagged=tags) + return tag_axes( + actx, + { i : TensorProductDOFAxis() for i in range(1, dim+1) }, + actx.einsum(spec, operator, data, + arg_names=arg_names, + tagged=tags) + ) # }}} From 6f9232675c764b497f9f36bfc3b201387bb520cb Mon Sep 17 00:00:00 2001 From: Addison Alvey-Blanco Date: Tue, 5 Mar 2024 10:13:06 -0600 Subject: [PATCH 2/6] Add temporary eager tag. Flag eager tag for removal --- grudge/array_context.py | 5 ++++- grudge/op.py | 25 +++++++++++++++++-------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/grudge/array_context.py b/grudge/array_context.py index 0a55f628e..653c2fce5 100644 --- a/grudge/array_context.py +++ b/grudge/array_context.py @@ -662,6 +662,8 @@ def clone(self): # {{{ tensor product-specific machinery +# FIXME: This is not permanent, but is necessary for eager evaluation in the +# current state of `actx.einsum` class OutputIsTensorProductDOFArrayOrdered(Tag): """Signify that the strides will not be of order "C" or "F". @@ -675,7 +677,8 @@ class OutputIsTensorProductDOFArrayOrdered(Tag): class TensorProductDOFAxis(DiscretizationDOFAxisTag): """ - TODO: Add doc + Signify that an axis contains DOF data and belongs to a tensor product + discretization. """ pass diff --git a/grudge/op.py b/grudge/op.py index bf21af34f..d60110259 100644 --- a/grudge/op.py +++ b/grudge/op.py @@ -216,7 +216,8 @@ def compute_tensor_product_derivative(actx, grp, get_diff_mat, vec, ijm, for ax in apply_mass_axes: vec_mass_applied = single_axis_operator_application( actx, grp.dim, mass_1d, ax, vec, - tags=(FirstAxisIsElementsTag(),), + tags=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("mass_1d", "vec") ) @@ -224,7 +225,8 @@ def compute_tensor_product_derivative(actx, grp, get_diff_mat, vec, ijm, grp.space, single_axis_operator_application( actx, grp.dim, stiff_1d, xyz_axis, vec_mass_applied, - tags=(FirstAxisIsElementsTag(),), + tags=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("stiff_1d", "vec_with_mass_applied")) ) @@ -243,7 +245,8 @@ def compute_tensor_product_derivative(actx, grp, get_diff_mat, vec, ijm, grp.space, single_axis_operator_application( actx, grp.dim, diff_mat, xyz_axis, vec, - tags=(FirstAxisIsElementsTag(),), + tags=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("diff_mat", "vec")) ) @@ -323,7 +326,8 @@ def compute_tensor_product_grad(actx, grp, diff_mat, vec, ijm, for ax in apply_mass_axes: grad[xyz_axis] = single_axis_operator_application( actx, grp.dim, mass_1d, ax, grad[xyz_axis], - tags=(FirstAxisIsElementsTag(),), + tags=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("mass_1d", f"vec_{xyz_axis}") ) @@ -346,7 +350,8 @@ def compute_tensor_product_grad(actx, grp, diff_mat, vec, ijm, grp.space, single_axis_operator_application( actx, grp.dim, diff_mat, xyz_axis, grad[xyz_axis], - tags=(FirstAxisIsElementsTag(),), + tags=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("diff_mat", f"vec_{xyz_axis}") ) ) @@ -431,13 +436,15 @@ def compute_tensor_product_div(actx, grp, diff_mat, vec, ijm): for ax in apply_mass_axes: ref[xyz_axis] = single_axis_operator_application( actx, grp.dim, mass_1d, ax, ref[xyz_axis], - tags=(FirstAxisIsElementsTag(),), + tags=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("mass_1d", f"vec_{func_axis}_{xyz_axis}") ) ref[xyz_axis] = single_axis_operator_application( actx, grp.dim, stiff_1d, xyz_axis, ref[xyz_axis], - tags=(FirstAxisIsElementsTag(),), + tags=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("stiff_1d", f"vec_{func_axis}_{xyz_axis}") ) @@ -454,7 +461,8 @@ def compute_tensor_product_div(actx, grp, diff_mat, vec, ijm): ref[xyz_axis] = single_axis_operator_application( actx, grp.dim, diff_mat, xyz_axis, ref[xyz_axis], - tags=(FirstAxisIsElementsTag(),), + tags=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("diff_mat", f"vec_{func_axis}_{xyz_axis}") ) @@ -476,6 +484,7 @@ def compute_tensor_product_div(actx, grp, diff_mat, vec, ijm): ) return div + # }}} From 9b1ddb32e79bb0969aa487c3a85d97fb1abebb4c Mon Sep 17 00:00:00 2001 From: Addison Alvey-Blanco Date: Tue, 5 Mar 2024 12:29:47 -0600 Subject: [PATCH 3/6] Add missing eager tag back into grad. Minor formatting fix --- grudge/array_context.py | 50 ++++++++++++++++++++++------------------- grudge/op.py | 3 ++- 2 files changed, 29 insertions(+), 24 deletions(-) diff --git a/grudge/array_context.py b/grudge/array_context.py index 653c2fce5..3da703efb 100644 --- a/grudge/array_context.py +++ b/grudge/array_context.py @@ -107,6 +107,10 @@ import pyopencl.tools from mpi4py import MPI +# }}} + + +# {{{ pyopencl class PyOpenCLArrayContext(_PyOpenCLArrayContextBase): """Inherits from :class:`meshmode.array_context.PyOpenCLArrayContext`. Extends it @@ -125,29 +129,29 @@ def __init__(self, queue: "pyopencl.CommandQueue", super().__init__(queue, allocator, wait_event_queue_length, force_device_scalars) - # def transform_loopy_program(self, t_unit): - # knl = t_unit.default_entrypoint - # - # # {{{ process tensor product specific metadata - # - # if knl.tags_of_type(OutputIsTensorProductDOFArrayOrdered): - # new_args = [] - # for arg in knl.args: - # if arg.is_output: - # arg = arg.copy(dim_tags=( - # f"N{len(arg.shape)-1}," - # + ",".join(f"N{i}" - # for i in range(len(arg.shape)-1)) - # )) - # - # new_args.append(arg) - # - # knl = knl.copy(args=new_args) - # t_unit = t_unit.with_kernel(knl) - # - # # }}} - # - # return super().transform_loopy_program(t_unit) + def transform_loopy_program(self, t_unit): + knl = t_unit.default_entrypoint + + # {{{ process tensor product specific metadata + + if knl.tags_of_type(OutputIsTensorProductDOFArrayOrdered): + new_args = [] + for arg in knl.args: + if arg.is_output: + arg = arg.copy(dim_tags=( + f"N{len(arg.shape)-1}," + + ",".join(f"N{i}" + for i in range(len(arg.shape)-1)) + )) + + new_args.append(arg) + + knl = knl.copy(args=new_args) + t_unit = t_unit.with_kernel(knl) + + # }}} + + return super().transform_loopy_program(t_unit) # }}} diff --git a/grudge/op.py b/grudge/op.py index d60110259..355ef6749 100644 --- a/grudge/op.py +++ b/grudge/op.py @@ -336,7 +336,8 @@ def compute_tensor_product_grad(actx, grp, diff_mat, vec, ijm, grp.space, single_axis_operator_application( actx, grp.dim, stiff_1d, xyz_axis, grad[xyz_axis], - tags=(FirstAxisIsElementsTag(),), + tags=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("stiff_1d", f"vec_{xyz_axis}")) ) From 20aef783c06b2816eda6bca24763f859c61a2f0c Mon Sep 17 00:00:00 2001 From: Addison Alvey-Blanco Date: Tue, 23 Apr 2024 10:16:44 -0500 Subject: [PATCH 4/6] Update how tagging is handled. Add dirs/files for xformations and metadata --- grudge/array_context.py | 2 +- grudge/op.py | 187 +++++++++++++++++++++-------------- grudge/transform/metadata.py | 73 ++++++++++++++ 3 files changed, 187 insertions(+), 75 deletions(-) create mode 100644 grudge/transform/metadata.py diff --git a/grudge/array_context.py b/grudge/array_context.py index 3da703efb..b942d5315 100644 --- a/grudge/array_context.py +++ b/grudge/array_context.py @@ -679,7 +679,7 @@ class OutputIsTensorProductDOFArrayOrdered(Tag): pass -class TensorProductDOFAxis(DiscretizationDOFAxisTag): +class TensorProductDOFAxisTag(DiscretizationDOFAxisTag): """ Signify that an axis contains DOF data and belongs to a tensor product discretization. diff --git a/grudge/op.py b/grudge/op.py index 355ef6749..364e5bf9f 100644 --- a/grudge/op.py +++ b/grudge/op.py @@ -81,7 +81,8 @@ from meshmode.transform_metadata import (FirstAxisIsElementsTag, DiscretizationDOFAxisTag, DiscretizationElementAxisTag, - DiscretizationFaceAxisTag) + DiscretizationFaceAxisTag, + DiscretizationAmbientDimAxisTag) from modepy.tools import ( reshape_array_for_tensor_product_space as fold, @@ -89,7 +90,13 @@ from grudge.discretization import DiscretizationCollection from grudge.dof_desc import as_dofdesc -from grudge.array_context import OutputIsTensorProductDOFArrayOrdered, TensorProductDOFAxis +from grudge.transform.metadata import ( + OutputIsTensorProductDOFArrayOrdered, + TensorProductDOFAxisTag, + TensorProductOperatorAxisTag, + ReferenceTensorProductMassOperatorTag as MassMatrix1DTag, + ReferenceTensorProductInverseMassOperatorTag as InverseMassMatrix1DTag +) from pytools import keyed_memoize_in from pytools.obj_array import make_obj_array @@ -185,6 +192,8 @@ # {{{ common derivative "kernels" +# {{{ single axis derivative + def _single_axis_derivative_kernel( actx, out_discr, in_discr, get_diff_mat, inv_jac_mat, xyz_axis, vec, *, metric_in_matvec): @@ -202,11 +211,8 @@ def _single_axis_derivative_kernel( def compute_tensor_product_derivative(actx, grp, get_diff_mat, vec, ijm, xyz_axis, metric_in_matvec): - vec = tag_axes( - actx, - { i: TensorProductDOFAxis() for i in range(1, grp.dim+1) }, - fold(grp.space, vec) - ) + + vec = fold(grp.space, vec) if metric_in_matvec: stiff_1d, mass_1d = get_diff_mat(actx, grp, grp) @@ -216,8 +222,8 @@ def compute_tensor_product_derivative(actx, grp, get_diff_mat, vec, ijm, for ax in apply_mass_axes: vec_mass_applied = single_axis_operator_application( actx, grp.dim, mass_1d, ax, vec, - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tagged=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("mass_1d", "vec") ) @@ -225,8 +231,8 @@ def compute_tensor_product_derivative(actx, grp, get_diff_mat, vec, ijm, grp.space, single_axis_operator_application( actx, grp.dim, stiff_1d, xyz_axis, vec_mass_applied, - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tagged=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("stiff_1d", "vec_with_mass_applied")) ) @@ -245,8 +251,8 @@ def compute_tensor_product_derivative(actx, grp, get_diff_mat, vec, ijm, grp.space, single_axis_operator_application( actx, grp.dim, diff_mat, xyz_axis, vec, - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tagged=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("diff_mat", "vec")) ) @@ -296,23 +302,27 @@ def compute_simplicial_derivative(actx, in_grp, out_grp, out_discr.groups, in_discr.groups, vec, inv_jac_mat))) +# }}} + + +# {{{ gradient def _gradient_kernel(actx, out_discr, in_discr, get_diff_mat, inv_jac_mat, vec, *, metric_in_matvec): # See _single_axis_derivative_kernel for comments on the usage scenarios # (both strong and weak derivative) and their differences. - - # {{{ tensor product gradient + # {{{ tensor product grad def compute_tensor_product_grad(actx, grp, diff_mat, vec, ijm, metric_in_matvec): + """ + Applies 1D operators one-axis-at-a-time to tensor-product discretized + DOF data. + """ + # reshape vector to expose tensor product structure - vec = tag_axes( - actx, - { i: TensorProductDOFAxis() for i in range(1, grp.dim+1) }, - fold(grp.space, vec) - ) + vec = fold(grp.space, vec) if metric_in_matvec: stiff_1d, mass_1d = get_diff_mat(actx, grp, grp) @@ -326,8 +336,8 @@ def compute_tensor_product_grad(actx, grp, diff_mat, vec, ijm, for ax in apply_mass_axes: grad[xyz_axis] = single_axis_operator_application( actx, grp.dim, mass_1d, ax, grad[xyz_axis], - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tagged=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("mass_1d", f"vec_{xyz_axis}") ) @@ -336,8 +346,8 @@ def compute_tensor_product_grad(actx, grp, diff_mat, vec, ijm, grp.space, single_axis_operator_application( actx, grp.dim, stiff_1d, xyz_axis, grad[xyz_axis], - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tagged=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("stiff_1d", f"vec_{xyz_axis}")) ) @@ -351,20 +361,27 @@ def compute_tensor_product_grad(actx, grp, diff_mat, vec, ijm, grp.space, single_axis_operator_application( actx, grp.dim, diff_mat, xyz_axis, grad[xyz_axis], - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tagged=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("diff_mat", f"vec_{xyz_axis}") ) ) grad = actx.np.stack(grad) - return actx.einsum( + return tag_axes( + actx, + { + 0: DiscretizationAmbientDimAxisTag(), + 1: DiscretizationElementAxisTag(), + 2: DiscretizationDOFAxisTag() + }, + actx.einsum( "xrej,rej->xej", ijm, grad, tagged=(FirstAxisIsElementsTag(),), - arg_names=("inv_jac_t", f"grad") - ) + arg_names=("inv_jac_t", f"vec") + )) # }}} @@ -405,6 +422,10 @@ def compute_simplicial_grad(actx, in_grp, out_grp, get_diff_mat, vec_i, actx, data=tuple([pgg_i[xyz_axis] for pgg_i in per_group_grads])) for xyz_axis in range(out_discr.ambient_dim)]) +# }}} + + +# {{{ divergence def _divergence_kernel(actx, out_discr, in_discr, get_diff_mat, inv_jac_mat, vec, *, metric_in_matvec): @@ -415,12 +436,13 @@ def _divergence_kernel(actx, out_discr, in_discr, get_diff_mat, inv_jac_mat, vec # {{{ tensor product div def compute_tensor_product_div(actx, grp, diff_mat, vec, ijm): + """ + Exploits tensor product structure to reduce complexity. See + `_gradient_kernel.compute_tensor_product_grad` for more details. + """ + vec = make_obj_array([ - tag_axes( - actx, - { i : TensorProductDOFAxis() for i in range(1, grp.dim+1) }, - fold(grp.space, vec[func_axis]) - ) + fold(grp.space, vec[func_axis]) for func_axis in range(vec.shape[0]) ]) @@ -437,15 +459,15 @@ def compute_tensor_product_div(actx, grp, diff_mat, vec, ijm): for ax in apply_mass_axes: ref[xyz_axis] = single_axis_operator_application( actx, grp.dim, mass_1d, ax, ref[xyz_axis], - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tagged=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("mass_1d", f"vec_{func_axis}_{xyz_axis}") ) ref[xyz_axis] = single_axis_operator_application( actx, grp.dim, stiff_1d, xyz_axis, ref[xyz_axis], - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tagged=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("stiff_1d", f"vec_{func_axis}_{xyz_axis}") ) @@ -462,8 +484,8 @@ def compute_tensor_product_div(actx, grp, diff_mat, vec, ijm): ref[xyz_axis] = single_axis_operator_application( actx, grp.dim, diff_mat, xyz_axis, ref[xyz_axis], - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tagged=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("diff_mat", f"vec_{func_axis}_{xyz_axis}") ) @@ -486,6 +508,7 @@ def compute_tensor_product_div(actx, grp, diff_mat, vec, ijm): return div + # }}} @@ -527,6 +550,8 @@ def compute_simplicial_div(actx, in_grp, out_grp, get_diff_mat, vec_i, # }}} +# }}} + # {{{ Derivative operators @@ -540,6 +565,7 @@ def _reference_derivative_matrices(actx: ArrayContext, actx, _reference_derivative_matrices, lambda grp: grp.discretization_key()) def get_ref_derivative_mats(grp): + if isinstance(grp, TensorProductElementGroup): import modepy as mp import numpy.linalg as la @@ -554,14 +580,11 @@ def get_ref_derivative_mats(grp): diff_mat = actx.from_numpy(vdm_p_1d @ la.inv(vdm_1d)) from arraycontext.metadata import NameHint - return actx.freeze( - actx.tag(NameHint("tp_diff_mat_1d"), - tag_axes(actx, { - 1: DiscretizationDOFAxisTag()}, - diff_mat))) + return actx.freeze(actx.tag(NameHint("tp_diff_mat_1d"), diff_mat)) elif isinstance(grp, SimplexElementGroup): from meshmode.discretization.poly_element import diff_matrices + return actx.freeze( actx.tag_axis( 1, DiscretizationDOFAxisTag(), @@ -759,18 +782,11 @@ def get_ref_stiffness_transpose_mat(out_grp, in_grp): diff_mat = la.solve(vdm.T, vdm_p.T).T stiff_1d = actx.freeze( - actx.tag_axis(1, DiscretizationDOFAxisTag(), - actx.from_numpy( - np.asarray( - diff_mat.T @ mass_1d.T)))) + actx.from_numpy(np.asarray(diff_mat.T @ mass_1d.T))) - from grudge.array_context import MassMatrix1d mass_1d = actx.freeze( - actx.tag_axis( - 1, (DiscretizationDOFAxisTag(),), - actx.from_numpy(np.asarray(mass_1d))) - ) - mass_1d = actx.tag(MassMatrix1d(), mass_1d) + actx.tag(MassMatrix1DTag(), + actx.from_numpy(np.asarray(mass_1d)))) return (stiff_1d, mass_1d) @@ -1118,18 +1134,17 @@ def get_ref_inv_mass_mat(grp): from modepy import inverse_mass_matrix if isinstance(grp, TensorProductElementGroup): + basis_1d = grp.bases_1d() nodes_1d = grp.unit_nodes_1d + inv_mass_1d = inverse_mass_matrix(basis_1d.functions, nodes_1d) + inv_mass_1d = actx.from_numpy(np.asarray(inv_mass_1d)) - from grudge.array_context import InverseMassMatrix1d - inv_mass_1d = actx.tag_axis(0, DiscretizationDOFAxisTag(), - actx.from_numpy(np.asarray(inv_mass_1d))) - inv_mass_1d = actx.freeze( - actx.tag(InverseMassMatrix1d(), inv_mass_1d)) + return actx.freeze(actx.tag(InverseMassMatrix1DTag(), inv_mass_1d)) - return inv_mass_1d elif isinstance(grp, SimplexElementGroup): + basis = grp.basis_obj() return actx.freeze( @@ -1174,8 +1189,8 @@ def apply_to_tensor_product_elements(grp, jac_inv, vec, ref_inv_mass): for xyz_axis in range(grp.dim): vec = single_axis_operator_application( actx, grp.dim, ref_inv_mass, xyz_axis, vec, - tags=(FirstAxisIsElementsTag(), - OutputIsTensorProductDOFArrayOrdered(),), + tagged=(FirstAxisIsElementsTag(), + OutputIsTensorProductDOFArrayOrdered(),), arg_names=("ref_inv_mass_1d", "vec")) vec = unfold(grp.space, vec) @@ -1457,29 +1472,53 @@ def face_mass(dcoll: DiscretizationCollection, *args) -> ArrayOrContainer: # {{{ general single axis operator application -def single_axis_operator_application(actx, dim, operator, axis, data, - arg_names=None, tags=None): +def single_axis_operator_application(actx, dim, operator, axis, vec, + arg_names=None, tagged=None): """ Used for applying 1D operators to a single axis of a tensor of DOF data. """ - if not isinstance(arg_names, tuple): - raise TypeError("arg_names must be a tuple.") - if not isinstance(tags, tuple): + if not isinstance(arg_names, tuple) and arg_names is not None: raise TypeError("arg_names must be a tuple.") + if not isinstance(tagged, tuple) and tagged is not None: + raise TypeError("tagged must be a tuple.") + + # {{{ ensure axes are properly tagged + vec = actx.tag_axis(0, DiscretizationElementAxisTag(), vec) + vec = tag_axes( + actx, + { i: TensorProductDOFAxisTag(i-1) for i in range(1, dim+1) }, + vec + ) + + operator = tag_axes( + actx, + { i: TensorProductOperatorAxisTag() for i in range(2) }, + operator + ) + + # }}} + + # {{{ einsum spec construction + + # 3D grad example spec using formula below: + # assume operator is a differentiation operator + # x-axis (axis = 0) contraction: ij,ejop->eiop + # y-axis (axis = 1) contraction: ij,eajp->eaip + # z-axis (axis = 2) contraction: ij,eabj->eabi operator_spec = 'ij' - data_spec = f'e{"abcdefghklm"[:axis]}j{"nopqrstuvwxyz"[:dim-axis-1]}' - out_spec = f'e{"abcdefghklm"[:axis]}i{"nopqrstuvwxyz"[:dim-axis-1]}' + data_spec = f'e{"abcdefghklmn"[:axis]}j{"opqrstuvwxyz"[:dim-axis-1]}' + out_spec = f'e{"abcdefghklmn"[:axis]}i{"opqrstuvwxyz"[:dim-axis-1]}' spec = operator_spec + ',' + data_spec + '->' + out_spec + # }}} + return tag_axes( actx, - { i : TensorProductDOFAxis() for i in range(1, dim+1) }, - actx.einsum(spec, operator, data, - arg_names=arg_names, - tagged=tags) + { i: TensorProductDOFAxisTag(i-1) for i in range(1, dim+1) }, + actx.einsum(spec, operator, vec, arg_names=arg_names, tagged=tagged) ) # }}} diff --git a/grudge/transform/metadata.py b/grudge/transform/metadata.py new file mode 100644 index 000000000..44318258a --- /dev/null +++ b/grudge/transform/metadata.py @@ -0,0 +1,73 @@ +__copyright__ = "Copyright (C) 2024 Addison Alvey-Blanco" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +from pytools.tag import Tag, tag_dataclass +from meshmode.transform_metadata import DiscretizationDOFAxisTag +from pytato.transform.metadata import AxisIgnoredForPropagationTag + + +# {{{ tensor product specific metadata + +class OutputIsTensorProductDOFArrayOrdered(Tag): + # FIXME: REMOVE THIS + # /!\ THIS IS TEMPORARY AND WILL GO AWAY /!\ + """ + Signify that the strides will not be of order "C" or "F". + + Used to specify strides for eager einsums. + """ + pass + + +@tag_dataclass +class TensorProductDOFAxisTag(DiscretizationDOFAxisTag): + """ + Tag an axis as being an axis containing the DOFs of a tensor-product + discretization. Used to signify the relative update speed of an axis for + transformation (i.e. loop nest ordering) purposes. + """ + iaxis: int + + +class TensorProductOperatorAxisTag(DiscretizationDOFAxisTag, + AxisIgnoredForPropagationTag): + """ + Signify that an axis belongs to a 1D operator. No tags will be propagated + along an axis tagged with this tag. + """ + pass + + +class ReferenceTensorProductMassOperatorTag(Tag): + """ + Used in DAG transformation to realize algebraic simplification of 1D + inverse mass operator times mass operator. + """ + pass + + +class ReferenceTensorProductInverseMassOperatorTag(Tag): + """ + See MassMatrix1d. + """ + +# }}} From 85ad84c66c875e6278ce24fb4dd3e11c2bb797aa Mon Sep 17 00:00:00 2001 From: Addison Alvey-Blanco Date: Mon, 29 Apr 2024 14:24:24 -0500 Subject: [PATCH 5/6] Minor div update --- grudge/op.py | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/grudge/op.py b/grudge/op.py index 364e5bf9f..08af2ee24 100644 --- a/grudge/op.py +++ b/grudge/op.py @@ -74,11 +74,12 @@ from functools import partial -from meshmode.dof_array import DOFArray, warn +from meshmode.dof_array import DOFArray from meshmode.discretization.poly_element import ( TensorProductElementGroupBase as TensorProductElementGroup, SimplexElementGroupBase as SimplexElementGroup) -from meshmode.transform_metadata import (FirstAxisIsElementsTag, +from meshmode.transform_metadata import (DiscretizationTopologicalDimAxisTag, + FirstAxisIsElementsTag, DiscretizationDOFAxisTag, DiscretizationElementAxisTag, DiscretizationFaceAxisTag, @@ -441,10 +442,18 @@ def compute_tensor_product_div(actx, grp, diff_mat, vec, ijm): `_gradient_kernel.compute_tensor_product_grad` for more details. """ - vec = make_obj_array([ - fold(grp.space, vec[func_axis]) - for func_axis in range(vec.shape[0]) - ]) + vec = tag_axes( + actx, + {0: DiscretizationTopologicalDimAxisTag(), + 1: DiscretizationElementAxisTag()}, + fold(grp.space, vec) + ) + + vec = tag_axes( + actx, + { i: TensorProductDOFAxisTag(i-2) for i in range(2, grp.dim) }, + vec + ) if metric_in_matvec: stiff_1d, mass_1d = get_diff_mat(actx, grp, grp) @@ -499,7 +508,7 @@ def compute_tensor_product_div(actx, grp, diff_mat, vec, ijm): partials = partials.reshape(grp.dim, grp.dim, *partials.shape[-2:]) div = actx.einsum( - 'xrej,xrej->ej', + "xrej,xrej->ej", ijm, partials, arg_names=("inv_jac_t", "partials"), @@ -508,7 +517,6 @@ def compute_tensor_product_div(actx, grp, diff_mat, vec, ijm): return div - # }}} From cfcf1e1ee75d2b24ffd0efe6f72a28979d10f95d Mon Sep 17 00:00:00 2001 From: Addison Alvey-Blanco Date: Mon, 29 Apr 2024 14:50:04 -0500 Subject: [PATCH 6/6] New div --- grudge/op.py | 60 +++++++++++++++++++--------------------------------- 1 file changed, 22 insertions(+), 38 deletions(-) diff --git a/grudge/op.py b/grudge/op.py index 08af2ee24..1cac26a3c 100644 --- a/grudge/op.py +++ b/grudge/op.py @@ -455,65 +455,49 @@ def compute_tensor_product_div(actx, grp, diff_mat, vec, ijm): vec ) + div = 0 if metric_in_matvec: stiff_1d, mass_1d = get_diff_mat(actx, grp, grp) - partials = [] for func_axis in range(vec.shape[0]): - ref = [] - for xyz_axis in range(grp.dim): - ref.append(vec[func_axis]) + for rst_axis in range(grp.dim): + ref_deriv = vec[func_axis].copy() - apply_mass_axes = set(range(grp.dim)) - {xyz_axis} + apply_mass_axes = set(range(grp.dim)) - {rst_axis} for ax in apply_mass_axes: - ref[xyz_axis] = single_axis_operator_application( - actx, grp.dim, mass_1d, ax, ref[xyz_axis], + ref_deriv = single_axis_operator_application( + actx, grp.dim, mass_1d, ax, ref_deriv, tagged=(FirstAxisIsElementsTag(), OutputIsTensorProductDOFArrayOrdered(),), - arg_names=("mass_1d", f"vec_{func_axis}_{xyz_axis}") + arg_names=("mass_1d", f"vec_{func_axis}_{rst_axis}") ) - ref[xyz_axis] = single_axis_operator_application( - actx, grp.dim, stiff_1d, xyz_axis, ref[xyz_axis], + ref_deriv = unfold( + grp.space, + single_axis_operator_application( + actx, grp.dim, stiff_1d, rst_axis, ref_deriv, tagged=(FirstAxisIsElementsTag(), OutputIsTensorProductDOFArrayOrdered(),), - arg_names=("stiff_1d", f"vec_{func_axis}_{xyz_axis}") - ) + arg_names=("stiff_1d", f"vec_{func_axis}_{rst_axis}") + )) - partials.append(ref) + div += ref_deriv*ijm[func_axis,rst_axis] else: diff_mat = get_diff_mat(actx, grp, grp) - partials = [] for func_axis in range(vec.shape[0]): - ref = [] - for xyz_axis in range(grp.dim): - ref.append(vec[func_axis]) + for rst_axis in range(grp.dim): + ref_deriv = vec[func_axis].copy() - ref[xyz_axis] = single_axis_operator_application( - actx, grp.dim, diff_mat, xyz_axis, ref[xyz_axis], + ref_deriv = unfold( + grp.space, + single_axis_operator_application( + actx, grp.dim, diff_mat, rst_axis, ref_deriv, tagged=(FirstAxisIsElementsTag(), OutputIsTensorProductDOFArrayOrdered(),), - arg_names=("diff_mat", f"vec_{func_axis}_{xyz_axis}") - ) - - partials.append(ref) - - partials = actx.np.stack([ - unfold(grp.space, partials[func_axis][xyz_axis]) - for func_axis in range(grp.dim) - for xyz_axis in range(grp.dim) - ]) - partials = partials.reshape(grp.dim, grp.dim, *partials.shape[-2:]) - - div = actx.einsum( - "xrej,xrej->ej", - ijm, - partials, - arg_names=("inv_jac_t", "partials"), - tagged=(FirstAxisIsElementsTag(),) - ) + arg_names=("diff_mat", f"vec_{func_axis}_{rst_axis}") + )) return div