diff --git a/.github/workflows/build-mac.yml b/.github/workflows/build-mac.yml index 51aa9f5104..f8530311df 100644 --- a/.github/workflows/build-mac.yml +++ b/.github/workflows/build-mac.yml @@ -50,6 +50,9 @@ jobs: run: | . ../firedrake_venv/bin/activate python -m pytest -v tests/firedrake/regression/ -k "poisson_strong or stokes_mini or dg_advection" + # also test for 'problem libraries' (spatialindex and libsupermesh) + python -m pytest -v tests/firedrake/regression/test_locate_cell.py + python -m pytest -v tests/firedrake/supermesh/test_assemble_mixed_mass_matrix.py timeout-minutes: 30 - name: Post-run cleanup if: ${{ always() }} diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 909db6990a..0eb616c24d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -66,6 +66,7 @@ jobs: --mpicxx="$MPICH_DIR"/mpicxx \ --mpif90="$MPICH_DIR"/mpif90 \ --mpiexec="$MPICH_DIR"/mpiexec \ + --mpihome="$MPICH_DIR"/.. \ --venv-name firedrake_venv \ --no-package-manager \ --disable-ssh \ diff --git a/.github/workflows/pip-mac.yml b/.github/workflows/pip-mac.yml index c8d1fbfe2e..54e2ada7cc 100644 --- a/.github/workflows/pip-mac.yml +++ b/.github/workflows/pip-mac.yml @@ -68,25 +68,6 @@ jobs: --download-superlu_dist make - - name: Install libsupermesh - run: | - source pip_venv/bin/activate - python -m pip install 'rtree>=1.2' - cd pip_venv/src - git clone https://github.com/firedrakeproject/libsupermesh.git - mkdir -p libsupermesh/build - cd libsupermesh/build - cmake .. \ - -DBUILD_SHARED_LIBS=ON \ - -DCMAKE_INSTALL_PREFIX="$VIRTUAL_ENV" \ - -DMPI_C_COMPILER=/opt/homebrew/bin/mpicc \ - -DMPI_CXX_COMPILER=/opt/homebrew/bin/mpicxx \ - -DMPI_Fortran_COMPILER=/opt/homebrew/bin/mpif90 \ - -DCMAKE_Fortran_COMPILER=/opt/homebrew/bin/mpif90 \ - -DMPIEXEC_EXECUTABLE=/opt/homebrew/bin/mpiexec - make - make install - - uses: actions/checkout@v4 with: path: pip_venv/src/firedrake @@ -118,6 +99,9 @@ jobs: cd pip_venv/src/firedrake python -m pytest --timeout=1800 -v tests/firedrake/regression \ -k "poisson_strong or stokes_mini or dg_advection" + # also test for 'problem libraries' (spatialindex and libsupermesh) + python -m pytest -v tests/firedrake/regression/test_locate_cell.py + python -m pytest -v tests/firedrake/supermesh/test_assemble_mixed_mass_matrix.py timeout-minutes: 30 - name: Cleanup (post) diff --git a/.github/workflows/pip.yml b/.github/workflows/pip.yml index c358b70141..868c71cf9c 100644 --- a/.github/workflows/pip.yml +++ b/.github/workflows/pip.yml @@ -56,25 +56,6 @@ jobs: with: path: src/firedrake - - name: Install libsupermesh - run: | - source pip_venv/bin/activate - python -m pip install 'rtree>=1.2' - cd pip_venv/src - git clone https://github.com/firedrakeproject/libsupermesh.git - mkdir -p libsupermesh/build - cd libsupermesh/build - cmake .. \ - -DBUILD_SHARED_LIBS=ON \ - -DCMAKE_INSTALL_PREFIX="$VIRTUAL_ENV" \ - -DMPI_C_COMPILER="$MPICH_DIR/mpicc" \ - -DMPI_CXX_COMPILER="$MPICH_DIR/mpicxx" \ - -DMPI_Fortran_COMPILER="$MPICH_DIR/mpif90" \ - -DCMAKE_Fortran_COMPILER="$MPICH_DIR/mpif90" \ - -DMPIEXEC_EXECUTABLE="$MPICH_DIR/mpiexec" - make - make install - - name: Pip install run: | source pip_venv/bin/activate diff --git a/.github/workflows/pyop2.yml b/.github/workflows/pyop2.yml index b5e750242a..5104284d05 100644 --- a/.github/workflows/pyop2.yml +++ b/.github/workflows/pyop2.yml @@ -35,17 +35,6 @@ jobs: id: setup-python with: python-version: ${{ matrix.python-version }} - # By default setup-python pollutes the environment in such a way that virtual - # environments cannot be used. This prevents us from building libsupermesh because - # it relies on having rtree installed into a venv. - # https://github.com/actions/setup-python/issues/851 - # https://github.com/actions/setup-python/blob/main/docs/advanced-usage.md#using-update-environment-flag - update-environment: false - - - name: Create virtual environment - shell: bash - run: | - ${{ steps.setup-python.outputs.python-path }} -m venv venv - name: Clone PETSc uses: actions/checkout@v4 @@ -65,25 +54,6 @@ jobs: --with-fortran-bindings=0 make - - name: Install libsupermesh - shell: bash - run: | - source venv/bin/activate - python -m pip install 'rtree>=1.2' - git clone https://github.com/firedrakeproject/libsupermesh.git - mkdir -p libsupermesh/build - cd libsupermesh/build - cmake .. \ - -DBUILD_SHARED_LIBS=ON \ - -DCMAKE_INSTALL_PREFIX="$VIRTUAL_ENV" \ - -DMPI_C_COMPILER=mpicc \ - -DMPI_CXX_COMPILER=mpicxx \ - -DMPI_Fortran_COMPILER=mpif90 \ - -DCMAKE_Fortran_COMPILER=mpif90 \ - -DMPIEXEC_EXECUTABLE=mpiexec - make - make install - - name: Checkout Firedrake uses: actions/checkout@v4 with: @@ -93,7 +63,6 @@ jobs: shell: bash working-directory: firedrake run: | - source ../venv/bin/activate python -m pip install -U pip python -m pip install -U pytest-timeout @@ -101,7 +70,6 @@ jobs: shell: bash working-directory: firedrake run: | - source ../venv/bin/activate export CC=mpicc export HDF5_DIR="$PETSC_DIR/$PETSC_ARCH" export HDF5_MPI=ON @@ -111,7 +79,6 @@ jobs: shell: bash working-directory: firedrake run: | - source ../venv/bin/activate pytest --tb=native --timeout=480 --timeout-method=thread -o faulthandler_timeout=540 -v tests/tsfc timeout-minutes: 10 @@ -119,7 +86,6 @@ jobs: shell: bash working-directory: firedrake run: | - source ../venv/bin/activate pytest -m "not parallel" --tb=native --timeout=480 --timeout-method=thread -o faulthandler_timeout=540 -v tests/pyop2 mpiexec -n 2 --oversubscribe pytest -m "parallel[2]" --tb=native --timeout=480 --timeout-method=thread -o faulthandler_timeout=540 -v tests/pyop2 mpiexec -n 3 --oversubscribe pytest -m "parallel[3]" --tb=native --timeout=480 --timeout-method=thread -o faulthandler_timeout=540 -v tests/pyop2 diff --git a/docker/Dockerfile.complex b/docker/Dockerfile.complex index 1528f5c4ac..6aa73f280c 100644 --- a/docker/Dockerfile.complex +++ b/docker/Dockerfile.complex @@ -18,5 +18,6 @@ RUN bash -c "python3 firedrake-install \ --mpicxx=$MPICH_DIR/mpicxx \ --mpif90=$MPICH_DIR/mpif90 \ --mpiexec=$MPICH_DIR/mpiexec \ + --mpihome=$MPICH_DIR/.. \ --slepc \ --documentation-dependencies" diff --git a/docker/Dockerfile.vanilla b/docker/Dockerfile.vanilla index 2de26b1916..eee3ef53e3 100644 --- a/docker/Dockerfile.vanilla +++ b/docker/Dockerfile.vanilla @@ -16,4 +16,5 @@ RUN bash -c "python3 firedrake-install \ --mpicc=$MPICH_DIR/mpicc \ --mpicxx=$MPICH_DIR/mpicxx \ --mpif90=$MPICH_DIR/mpif90 \ - --mpiexec=$MPICH_DIR/mpiexec" + --mpiexec=$MPICH_DIR/mpiexec \ + --mpihome=$MPICH_DIR/.." diff --git a/docs/source/download.rst b/docs/source/download.rst index 74645fc682..5f8f478864 100644 --- a/docs/source/download.rst +++ b/docs/source/download.rst @@ -241,7 +241,6 @@ Requirements * An activated virtual environment. * All the system requirements listed in :ref:`system-requirements`. * A Firedrake-compatible PETSc installation (using our `fork of PETSc `_). The set of flags passed to PETSc can be retrieved by passing the command ``--show-petsc-configure-options`` to ``firedrake-install``. -* `libsupermesh `_ to be installed inside the virtual environment (see `here `_ for an example of how to do this). * The following environment variables to be set: * ``PETSC_DIR`` and ``PETSC_ARCH`` to point to the correct location for the PETSc installation. diff --git a/docs/source/firedrake_usa_25.rst b/docs/source/firedrake_usa_25.rst index e1d7a2b293..8cac5f4a9c 100644 --- a/docs/source/firedrake_usa_25.rst +++ b/docs/source/firedrake_usa_25.rst @@ -27,13 +27,13 @@ The conference will begin with a tutorial session on the morning of 28 February Conference venue ---------------- -The conference will take place in the Bill Daniel Student Center in Room 202 in the heart of the Baylor campus. +The conference will take place in the Bill Daniel Student Center in Room 202 in the heart of the Baylor campus. A `campus map `__ is available online, and both Apple Maps and Google Maps accurately locate the Bill Daniel Student Center. Accommodation ------------- -We will be reserving a room block in a hotel near campus. More information to follow. +We have reserved a block of hotel rooms at SpringHill Suites Waco, about a 15 minute walk from the Bill Daniel Student Center. Follow `this link `__ to hold your room. The group rate is only available until 20 January 2025. Conference dinner @@ -53,9 +53,9 @@ The registration fees are as follows: :widths: 25 50 :header-rows: 0 - * - Student + * - Student: - $50 - * - Non-student + * - Non-student: - $200 The `SIAM Texas-Louisiana Section `__ is providing some support for students currently attending universities in Texas or Louisiana to attend. @@ -66,7 +66,7 @@ Conference registration is coming soon. Abstract submission ------------------- -Abstract submission will open soon via Easy Chair. +Abstracts can be submitted `via EasyChair `__. @@ -79,17 +79,11 @@ The conference has been kindly supported by the SIAM TX-LA Section and EPSRC. Travel to Waco -------------- -* By air +* By air: Waco has a small airport. There is daily service between Waco and Dallas/Fort Worth International Airport via American Airlines. - Waco has a small airport. There is daily service between Waco and Dallas/Fort Worth International Airport via American Airlines. +* By ground: We are less than two hours by car from the Dallas and Austin airports, and just under three hours from Bush Intercontinental Airport in Houston. Additionally, Waco is reachable by bus services such as Greyhound and FlixBus. -* By ground - - We are less than two hours by car from the Dallas and Austin airports, and just under three hours from Bush Intercontinental Airport in Houston. Additionally, Waco is reachable by bus services such as Greyhound and FlixBus. - -* Parking on campus - - Baylor has plenty of visitor parking for your personal or rental vehicle, but conslut `these instructions __` and make sure to `register your vehicle __`. +* Parking on campus: Baylor has plenty of visitor parking for your personal or rental vehicle, but consult `these instructions `__ and make sure to `register your vehicle `__. diff --git a/docs/source/parallelism.rst b/docs/source/parallelism.rst index 70fc119e98..d71b3d9305 100644 --- a/docs/source/parallelism.rst +++ b/docs/source/parallelism.rst @@ -45,11 +45,12 @@ firedrake installer to use it, by running: .. code-block:: shell - python3 firedrake-install --mpiexec=mpiexec --mpicc=mpicc --mpicxx=mpicxx --mpif90=mpif90 + python3 firedrake-install --mpiexec=mpiexec --mpicc=mpicc --mpicxx=mpicxx --mpif90=mpif90 --mpihome mpihome where ``mpiexec``, ``mpicc``, ``mpicxx``, and ``mpif90`` are the commands to run an MPI job and to compile C, C++, and Fortran 90 code, -respectively. +respectively. ``mpihome`` is an extra variable that must point to the +root directory of the MPI installation (e.g. ``/usr`` or ``/opt/mpich``). Printing in parallel ==================== diff --git a/firedrake/cython/dmcommon.pyx b/firedrake/cython/dmcommon.pyx index 75f980d7ba..b5e0777dfe 100644 --- a/firedrake/cython/dmcommon.pyx +++ b/firedrake/cython/dmcommon.pyx @@ -3358,7 +3358,7 @@ def make_global_numbering(PETSc.Section lsec, PETSc.Section gsec): cdef: PetscInt c, cc, p, pStart, pEnd, dof, cdof, loff, goff np.ndarray val - PetscInt *dof_array = NULL + const PetscInt *dof_array = NULL val = np.empty(lsec.getStorageSize(), dtype=IntType) pStart, pEnd = lsec.getChart() diff --git a/firedrake/interpolation.py b/firedrake/interpolation.py index 8a20e3da73..f27024ff28 100644 --- a/firedrake/interpolation.py +++ b/firedrake/interpolation.py @@ -1092,8 +1092,7 @@ def _interpolator(V, tensor, expr, subset, arguments, access, bcs=None): # interpolation) we have to pass the finat element we construct # here. Ideally we would only pass the UFL element through. kernel = compile_expression(cell_set.comm, expr, to_element, V.ufl_element(), - domain=source_mesh, parameters=parameters, - log=PETSc.Log.isActive()) + domain=source_mesh, parameters=parameters) ast = kernel.ast oriented = kernel.oriented needs_cell_sizes = kernel.needs_cell_sizes @@ -1221,10 +1220,9 @@ def _interpolator(V, tensor, expr, subset, arguments, access, bcs=None): f"firedrake-tsfc-expression-kernel-cache-uid{os.getuid()}") -def _compile_expression_key(comm, expr, to_element, ufl_element, domain, parameters, log): +def _compile_expression_key(comm, expr, to_element, ufl_element, domain, parameters): """Generate a cache key suitable for :func:`tsfc.compile_expression_dual_evaluation`.""" - key = hash_expr(expr), hash(ufl_element), utils.tuplify(parameters), log - return key + return (hash_expr(expr), hash(ufl_element), utils.tuplify(parameters)) @memory_and_disk_cache( diff --git a/firedrake/parloops.py b/firedrake/parloops.py index 08cb57e3d8..0a33cd4ae5 100644 --- a/firedrake/parloops.py +++ b/firedrake/parloops.py @@ -171,7 +171,7 @@ def par_loop(kernel, measure, args, kernel_kwargs=None, **kwargs): domain = '{[i]: 0 <= i < A.dofs}' instructions = ''' for i - A[i] = max(A[i], B[0]) + A[i] = fmax(A[i], B[0]) end ''' par_loop((domain, instructions), dx, {'A' : (A, RW), 'B': (B, READ)}) diff --git a/firedrake/preconditioners/pmg.py b/firedrake/preconditioners/pmg.py index 8726db6c9b..48df37d615 100644 --- a/firedrake/preconditioners/pmg.py +++ b/firedrake/preconditioners/pmg.py @@ -530,7 +530,7 @@ def coarsen_bc_value(self, bc, cV): def prolongation_transfer_kernel_action(Vf, expr): to_element = create_element(Vf.ufl_element()) - kernel = compile_expression_dual_evaluation(expr, to_element, Vf.ufl_element(), log=PETSc.Log.isActive()) + kernel = compile_expression_dual_evaluation(expr, to_element, Vf.ufl_element()) coefficients = extract_numbered_coefficients(expr, kernel.coefficient_numbers) if kernel.needs_external_coords: coefficients = [Vf.mesh().coordinates] + coefficients diff --git a/firedrake/slate/slac/compiler.py b/firedrake/slate/slac/compiler.py index 7e1b14281c..d9e7bced02 100644 --- a/firedrake/slate/slac/compiler.py +++ b/firedrake/slate/slac/compiler.py @@ -237,4 +237,4 @@ def gem_to_loopy(gem_expr, var2terminal, scalar_type): # Part B: impero_c to loopy output_arg = OutputKernelArg(output_loopy_arg) - return generate_loopy(impero_c, args, scalar_type, "slate_loopy", [], log=PETSc.Log.isActive()), output_arg + return generate_loopy(impero_c, args, scalar_type, "slate_loopy", []), output_arg diff --git a/firedrake/supermeshing.py b/firedrake/supermeshing.py index a1ce2cde17..ee576ea6e5 100644 --- a/firedrake/supermeshing.py +++ b/firedrake/supermeshing.py @@ -1,7 +1,8 @@ # Code for projections and other fun stuff involving supermeshes. import firedrake import ctypes -import sys +import pathlib +import libsupermesh from firedrake.cython.supermeshimpl import assemble_mixed_mass_matrix as ammm, intersection_finder from firedrake.mg.utils import get_level from firedrake.petsc import PETSc @@ -428,7 +429,8 @@ def likely(cell_A): "complex_mode": 1 if complex_mode else 0 } - dirs = get_petsc_dir() + (sys.prefix, ) + libsupermesh_dir = pathlib.Path(libsupermesh.get_include()).parent.absolute() + dirs = get_petsc_dir() + (libsupermesh_dir,) includes = ["-I%s/include" % d for d in dirs] libs = ["-L%s/lib" % d for d in dirs] libs = libs + ["-Wl,-rpath,%s/lib" % d for d in dirs] + ["-lpetsc", "-lsupermesh"] diff --git a/firedrake/tsfc_interface.py b/firedrake/tsfc_interface.py index a4a57ae0cb..ba10d79507 100644 --- a/firedrake/tsfc_interface.py +++ b/firedrake/tsfc_interface.py @@ -53,8 +53,8 @@ ) -def tsfc_compile_form_hashkey(form, prefix, parameters, interface, diagonal, log): - # Drop prefix as it's only used for naming and log +def tsfc_compile_form_hashkey(form, prefix, parameters, interface, diagonal): + # Drop prefix as it's only used for naming return default_parallel_hashkey(form.signature(), prefix, parameters, interface, diagonal) @@ -94,7 +94,7 @@ def __init__( """ tree = tsfc_compile_form(form, prefix=name, parameters=parameters, interface=interface, - diagonal=diagonal, log=PETSc.Log.isActive()) + diagonal=diagonal) kernels = [] for kernel in tree: # Individual kernels do not have to use all of the coefficients diff --git a/firedrake/utility_meshes.py b/firedrake/utility_meshes.py index 4f2a0f34f1..99d687449e 100644 --- a/firedrake/utility_meshes.py +++ b/firedrake/utility_meshes.py @@ -24,6 +24,7 @@ as_tensor, dot, And, + Or, sin, cos, real @@ -1800,6 +1801,8 @@ def PeriodicBoxMesh( Lx, Ly, Lz, + directions=(True, True, True), + hexahedral=False, reorder=None, distribution_parameters=None, comm=COMM_WORLD, @@ -1809,105 +1812,185 @@ def PeriodicBoxMesh( ): """Generate a periodic mesh of a 3D box. - :arg nx: The number of cells in the x direction - :arg ny: The number of cells in the y direction - :arg nz: The number of cells in the z direction - :arg Lx: The extent in the x direction - :arg Ly: The extent in the y direction - :arg Lz: The extent in the z direction - :kwarg reorder: (optional), should the mesh be reordered? - :kwarg distribution_parameters: options controlling mesh - distribution, see :func:`.Mesh` for details. - :kwarg comm: Optional communicator to build the mesh on. - :kwarg name: Optional name of the mesh. - :kwarg distribution_name: the name of parallel distribution used - when checkpointing; if `None`, the name is automatically - generated. - :kwarg permutation_name: the name of entity permutation (reordering) used - when checkpointing; if `None`, the name is automatically - generated. + Parameters + ---------- + nx : int + Number of cells in the x direction. + ny : int + Number of cells in the y direction. + nz : int + Number of cells in the z direction. + Lx : float + Extent in the x direction. + Ly : float + Extent in the y direction. + Lz : float + Extent in the z direction. + directions : list or tuple + Directions of periodicity. + hexahedral : bool + Whether to make hexahedral mesh or not. + reorder : bool or None + Whether to reorder the mesh. + distribution_parameters : dict or None + Options controlling mesh distribution, see :func:`.Mesh` for details. + comm : + Communicator to build the mesh on. + name : str + Name of the mesh. + distribution_name : str or None + Name of parallel distribution used when checkpointing; + if `None`, the name is automatically generated. + permutation_name : str or None + Name of entity permutation (reordering) used when checkpointing; + if `None`, the name is automatically generated. + + Returns + ------- + MeshGeometry + The mesh. + + Notes + ----- + + The boundary surfaces are numbered as follows: + + * 1: plane x == 0 + * 2: plane x == 1 + * 3: plane y == 0 + * 4: plane y == 1 + * 5: plane z == 0 + * 6: plane z == 1 + + where periodic surfaces are regarded as interior, for which dS integral is to be used. + """ for n in (nx, ny, nz): if n < 3: raise ValueError( "3D periodic meshes with fewer than 3 cells are not currently supported" ) + if hexahedral: + if len(directions) != 3: + raise ValueError(f"directions must have exactly dim (=3) elements : Got {directions}") + plex = PETSc.DMPlex().createBoxMesh( + (nx, ny, nz), + lower=(0., 0., 0.), + upper=(Lx, Ly, Lz), + simplex=False, + periodic=directions, + interpolate=True, + sparseLocalize=False, + comm=comm, + ) + m = mesh.Mesh( + plex, + reorder=reorder, + distribution_parameters=distribution_parameters, + name=name, + distribution_name=distribution_name, + permutation_name=permutation_name, + comm=comm) + x, y, z = SpatialCoordinate(m) + V = FunctionSpace(m, "Q", 2) + eps = min([Lx / nx, Ly / ny, Lz / nz]) / 1000. + if directions[0]: # x + fx0 = Function(V).interpolate(conditional(Or(x < eps, x > Lx - eps), 1., 0.)) + fx1 = fx0 + else: + fx0 = Function(V).interpolate(conditional(x < eps, 1., 0.)) + fx1 = Function(V).interpolate(conditional(x > Lx - eps, 1., 0.)) + if directions[1]: # y + fy0 = Function(V).interpolate(conditional(Or(y < eps, y > Ly - eps), 1., 0.)) + fy1 = fy0 + else: + fy0 = Function(V).interpolate(conditional(y < eps, 1., 0.)) + fy1 = Function(V).interpolate(conditional(y > Ly - eps, 1., 0.)) + if directions[2]: # z + fz0 = Function(V).interpolate(conditional(Or(z < eps, z > Lz - eps), 1., 0.)) + fz1 = fz0 + else: + fz0 = Function(V).interpolate(conditional(z < eps, 1., 0.)) + fz1 = Function(V).interpolate(conditional(z > Lz - eps, 1., 0.)) + return mesh.RelabeledMesh(m, [fx0, fx1, fy0, fy1, fz0, fz1], [1, 2, 3, 4, 5, 6], name=name) + else: + if tuple(directions) != (True, True, True): + raise NotImplementedError("Can only specify directions with hexahedral = True") + xcoords = np.arange(0.0, Lx, Lx / nx, dtype=np.double) + ycoords = np.arange(0.0, Ly, Ly / ny, dtype=np.double) + zcoords = np.arange(0.0, Lz, Lz / nz, dtype=np.double) + coords = ( + np.asarray(np.meshgrid(xcoords, ycoords, zcoords)).swapaxes(0, 3).reshape(-1, 3) + ) + i, j, k = np.meshgrid( + np.arange(nx, dtype=np.int32), + np.arange(ny, dtype=np.int32), + np.arange(nz, dtype=np.int32), + ) + v0 = k * nx * ny + j * nx + i + v1 = k * nx * ny + j * nx + (i + 1) % nx + v2 = k * nx * ny + ((j + 1) % ny) * nx + i + v3 = k * nx * ny + ((j + 1) % ny) * nx + (i + 1) % nx + v4 = ((k + 1) % nz) * nx * ny + j * nx + i + v5 = ((k + 1) % nz) * nx * ny + j * nx + (i + 1) % nx + v6 = ((k + 1) % nz) * nx * ny + ((j + 1) % ny) * nx + i + v7 = ((k + 1) % nz) * nx * ny + ((j + 1) % ny) * nx + (i + 1) % nx - xcoords = np.arange(0.0, Lx, Lx / nx, dtype=np.double) - ycoords = np.arange(0.0, Ly, Ly / ny, dtype=np.double) - zcoords = np.arange(0.0, Lz, Lz / nz, dtype=np.double) - coords = ( - np.asarray(np.meshgrid(xcoords, ycoords, zcoords)).swapaxes(0, 3).reshape(-1, 3) - ) - i, j, k = np.meshgrid( - np.arange(nx, dtype=np.int32), - np.arange(ny, dtype=np.int32), - np.arange(nz, dtype=np.int32), - ) - v0 = k * nx * ny + j * nx + i - v1 = k * nx * ny + j * nx + (i + 1) % nx - v2 = k * nx * ny + ((j + 1) % ny) * nx + i - v3 = k * nx * ny + ((j + 1) % ny) * nx + (i + 1) % nx - v4 = ((k + 1) % nz) * nx * ny + j * nx + i - v5 = ((k + 1) % nz) * nx * ny + j * nx + (i + 1) % nx - v6 = ((k + 1) % nz) * nx * ny + ((j + 1) % ny) * nx + i - v7 = ((k + 1) % nz) * nx * ny + ((j + 1) % ny) * nx + (i + 1) % nx - - cells = [ - [v0, v1, v3, v7], - [v0, v1, v7, v5], - [v0, v5, v7, v4], - [v0, v3, v2, v7], - [v0, v6, v4, v7], - [v0, v2, v6, v7], - ] - cells = np.asarray(cells).reshape(-1, ny, nx, nz).swapaxes(0, 3).reshape(-1, 4) - plex = mesh.plex_from_cell_list( - 3, cells, coords, comm, mesh._generate_default_mesh_topology_name(name) - ) - m = mesh.Mesh( - plex, - reorder=reorder_noop, - distribution_parameters=distribution_parameters_no_overlap, - name=name, - distribution_name=distribution_name, - permutation_name=permutation_name, - comm=comm, - ) + cells = [ + [v0, v1, v3, v7], + [v0, v1, v7, v5], + [v0, v5, v7, v4], + [v0, v3, v2, v7], + [v0, v6, v4, v7], + [v0, v2, v6, v7], + ] + cells = np.asarray(cells).reshape(-1, ny, nx, nz).swapaxes(0, 3).reshape(-1, 4) + plex = mesh.plex_from_cell_list( + 3, cells, coords, comm, mesh._generate_default_mesh_topology_name(name) + ) + m = mesh.Mesh( + plex, + reorder=reorder_noop, + distribution_parameters=distribution_parameters_no_overlap, + name=name, + distribution_name=distribution_name, + permutation_name=permutation_name, + comm=comm, + ) - new_coordinates = Function( - VectorFunctionSpace( - m, FiniteElement("DG", tetrahedron, 1, variant="equispaced") - ), - name=mesh._generate_default_mesh_coordinates_name(name), - ) - new_coordinates.interpolate(m.coordinates) + new_coordinates = Function( + VectorFunctionSpace( + m, FiniteElement("DG", tetrahedron, 1, variant="equispaced") + ), + name=mesh._generate_default_mesh_coordinates_name(name), + ) + new_coordinates.interpolate(m.coordinates) - coords_by_cell = new_coordinates.dat.data.reshape((-1, 4, 3)).transpose(1, 0, 2) + coords_by_cell = new_coordinates.dat.data.reshape((-1, 4, 3)).transpose(1, 0, 2) - # ensure we really got a view: - assert coords_by_cell.base is new_coordinates.dat.data.base + # ensure we really got a view: + assert coords_by_cell.base is new_coordinates.dat.data.base - # Find the cells that are too big in each direction because they are - # wrapped. - cell_is_wrapped = ( - (coords_by_cell.max(axis=0) - coords_by_cell.min(axis=0)) - / (Lx/nx, Ly/ny, Lz/nz) > 1.1 - ) + # Find the cells that are too big in each direction because they are + # wrapped. + cell_is_wrapped = ( + (coords_by_cell.max(axis=0) - coords_by_cell.min(axis=0)) + / (Lx/nx, Ly/ny, Lz/nz) > 1.1 + ) - # Move wrapped coordinates to the other end of the domain. - for i, extent in enumerate((Lx, Ly, Lz)): - coords = coords_by_cell[:, :, i] - coords[np.logical_and(cell_is_wrapped[:, i], np.isclose(coords, 0))] \ - = extent + # Move wrapped coordinates to the other end of the domain. + for i, extent in enumerate((Lx, Ly, Lz)): + coords = coords_by_cell[:, :, i] + coords[np.logical_and(cell_is_wrapped[:, i], np.isclose(coords, 0))] \ + = extent - return _postprocess_periodic_mesh(new_coordinates, - comm, - distribution_parameters, - reorder, - name, - distribution_name, - permutation_name) + return _postprocess_periodic_mesh(new_coordinates, + comm, + distribution_parameters, + reorder, + name, + distribution_name, + permutation_name) @PETSc.Log.EventDecorator() @@ -1915,6 +1998,8 @@ def PeriodicUnitCubeMesh( nx, ny, nz, + directions=(True, True, True), + hexahedral=False, reorder=None, distribution_parameters=None, comm=COMM_WORLD, @@ -1924,20 +2009,52 @@ def PeriodicUnitCubeMesh( ): """Generate a periodic mesh of a unit cube - :arg nx: The number of cells in the x direction - :arg ny: The number of cells in the y direction - :arg nz: The number of cells in the z direction - :kwarg reorder: (optional), should the mesh be reordered? - :kwarg distribution_parameters: options controlling mesh - distribution, see :func:`.Mesh` for details. - :kwarg comm: Optional communicator to build the mesh on. - :kwarg name: Optional name of the mesh. - :kwarg distribution_name: the name of parallel distribution used - when checkpointing; if `None`, the name is automatically - generated. - :kwarg permutation_name: the name of entity permutation (reordering) used - when checkpointing; if `None`, the name is automatically - generated. + Parameters + ---------- + nx : int + Number of cells in the x direction. + ny : int + Number of cells in the y direction. + nz : int + Number of cells in the z direction. + directions : list or tuple + Directions of periodicity. + hexahedral : bool + Whether to make hexahedral mesh or not. + reorder : bool or None + Should the mesh be reordered? + distribution_parameters : dict or None + Options controlling mesh distribution, see :func:`.Mesh` for details. + comm : + Communicator to build the mesh on. + name : str + Name of the mesh. + distribution_name : str or None + Name of parallel distribution used when checkpointing; + if `None`, the name is automatically generated. + permutation_name : str or None + Name of entity permutation (reordering) used when checkpointing; + if `None`, the name is automatically generated. + + Returns + ------- + MeshGeometry + The mesh. + + Notes + ----- + + The boundary surfaces are numbered as follows: + + * 1: plane x == 0 + * 2: plane x == 1 + * 3: plane y == 0 + * 4: plane y == 1 + * 5: plane z == 0 + * 6: plane z == 1 + + where periodic surfaces are regarded as interior, for which dS integral is to be used. + """ return PeriodicBoxMesh( nx, @@ -1946,6 +2063,8 @@ def PeriodicUnitCubeMesh( 1.0, 1.0, 1.0, + directions=directions, + hexahedral=hexahedral, reorder=reorder, distribution_parameters=distribution_parameters, comm=comm, diff --git a/pyproject.toml b/pyproject.toml index fe15d8e7c1..bb934e4182 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,6 +38,7 @@ dependencies = [ "fenics-fiat @ git+https://github.com/firedrakeproject/fiat.git", "pyadjoint-ad @ git+https://github.com/dolfin-adjoint/pyadjoint.git", "loopy @ git+https://github.com/firedrakeproject/loopy.git@main", + "libsupermesh @ git+https://github.com/firedrakeproject/libsupermesh.git", ] classifiers = [ "Development Status :: 5 - Production/Stable", @@ -98,6 +99,7 @@ requires = [ "mpi4py; python_version < '3.13'", "petsc4py", "rtree>=1.2", + "libsupermesh @ git+https://github.com/firedrakeproject/libsupermesh.git", ] build-backend = "setuptools.build_meta" diff --git a/requirements-git.txt b/requirements-git.txt index 93e6df0f47..b616f15bbc 100644 --- a/requirements-git.txt +++ b/requirements-git.txt @@ -4,3 +4,4 @@ git+https://github.com/dolfin-adjoint/pyadjoint.git#egg=pyadjoint-ad git+https://github.com/firedrakeproject/loopy.git@main#egg=loopy git+https://github.com/firedrakeproject/pytest-mpi.git@main#egg=pytest-mpi git+https://github.com/firedrakeproject/petsc.git@firedrake#egg=petsc +git+https://github.com/firedrakeproject/libsupermesh.git#egg=libsupermesh diff --git a/scripts/firedrake-install b/scripts/firedrake-install index f4406a6ffb..fa39e2b3b0 100755 --- a/scripts/firedrake-install +++ b/scripts/firedrake-install @@ -76,7 +76,7 @@ class FiredrakeConfiguration(dict): self["options"][o] = args.__dict__[o] _persistent_options = ["package_manager", - "minimal_petsc", "mpicc", "mpicxx", "mpif90", "mpiexec", "disable_ssh", + "minimal_petsc", "mpicc", "mpicxx", "mpif90", "mpiexec", "mpihome", "disable_ssh", "honour_petsc_dir", "with_parmetis", "slepc", "packages", "honour_pythonpath", "opencascade", "torch", "jax", @@ -306,6 +306,9 @@ honoured.""", parser.add_argument("--mpiexec", type=str, action="store", default=None, help="MPI launcher. If not set, MPICH will be downloaded and used.") + parser.add_argument("--mpihome", type=str, + action="store", default=None, + help="Location of MPI files. If not set, MPICH will be downloaded and used.") parser.add_argument("--mpi4py-version", help="Specify an exact version of mpi4py to install") parser.add_argument("--show-petsc-configure-options", action="store_true", help="Print out the configure options passed to PETSc and exit") @@ -339,9 +342,9 @@ honoured.""", args = parser.parse_args() # If the user has set any MPI info, they must set them all - if args.mpicc or args.mpicxx or args.mpif90 or args.mpiexec: - if not (args.mpicc and args.mpicxx and args.mpif90 and args.mpiexec): - log.error("If you set any MPI information, you must set all of {mpicc, mpicxx, mpif90, mpiexec}.") + if args.mpicc or args.mpicxx or args.mpif90 or args.mpiexec or args.mpihome: + if not (args.mpicc and args.mpicxx and args.mpif90 and args.mpiexec and args.mpihome): + log.error("If you set any MPI information, you must set all of {mpicc, mpicxx, mpif90, mpiexec, mpihome}.") sys.exit(1) if args.package_branch: @@ -1252,36 +1255,6 @@ def build_and_install_h5py(): log.info("No need to rebuild h5py") -def build_and_install_libsupermesh(cc, cxx, f90, mpiexec): - log.info("Installing libsupermesh") - url = "git+https://github.com/firedrakeproject/libsupermesh.git" - if os.path.exists("libsupermesh"): - changed = git_update("libsupermesh", url) - else: - git_clone(url) - changed = True - if changed: - with directory("libsupermesh"): - check_call(["git", "reset", "--hard"]) - check_call(["git", "clean", "-f", "-x", "-d"]) - check_call(["mkdir", "-p", "build"]) - with directory("build"): - cmd = [ - "cmake", "..", "-DBUILD_SHARED_LIBS=ON", - "-DCMAKE_INSTALL_PREFIX=" + firedrake_env, - "-DMPI_C_COMPILER=" + cc, - "-DMPI_CXX_COMPILER=" + cxx, - "-DMPI_Fortran_COMPILER=" + f90, - "-DCMAKE_Fortran_COMPILER=" + f90, - "-DMPIEXEC_EXECUTABLE=" + mpiexec, - ] - check_call(cmd) - check_call(["make"]) - check_call(["make", "install"]) - else: - log.info("No need to rebuild libsupermesh") - - def build_and_install_pythonocc(): log.info("Installing pythonocc-core") url = "git+https://github.com/tpaviot/pythonocc-core.git@595b0a4e8e60e8d6011bea0cdb54ac878efcfcd2" @@ -1420,7 +1393,7 @@ if args.rebuild_script: sys.exit(0) -def create_compiler_env(cc, cxx, f90): +def create_compiler_env(cc, cxx, f90, mpihome): env = dict() if cc: env["MPICC"] = cc @@ -1439,8 +1412,10 @@ def create_compiler_env(cc, cxx, f90): env["CXX"] = cxx if f90: env["MPIF90"] = f90 - env["MPI_C_COMPILER"] = f90 + env["MPI_Fortran_COMPILER"] = f90 env["F90"] = f90 + if mpihome: + env["MPI_HOME"] = mpihome return env @@ -1688,6 +1663,8 @@ run_pip(["install", "-U", "hatch"]) run_pip(["install", "-U", "editables"]) run_pip(["install", "-U", "pip"]) run_pip(["install", "-U", "wheel"]) +run_pip(["install", "-U", "scikit-build-core"]) +run_pip(["install", "-U", "rtree>=1.2"]) # Extra numpy dependendencies, see # https://github.com/numpy/numpy/blob/main/pyproject.toml @@ -1733,7 +1710,18 @@ cc = options["mpicc"] cxx = options["mpicxx"] f90 = options["mpif90"] mpiexec = options["mpiexec"] -compiler_env = create_compiler_env(cc, cxx, f90) +try: + mpihome = options["mpihome"] +except KeyError: + if cc: + try: + mpihome = os.environ["MPI_HOME"] + except KeyError: + raise InstallError("MPI compilers specified but mpihome is missing from " + "configuration. Please set MPI_HOME to continue.") + else: + mpihome = None +compiler_env = create_compiler_env(cc, cxx, f90, mpihome) os.chdir(firedrake_env) # Dict to store BLAS and OPENBLAS environment variables @@ -1830,7 +1818,8 @@ if mode == "install": cxx = os.path.join(compilerbin, "mpicxx") f90 = os.path.join(compilerbin, "mpif90") mpiexec = os.path.join(compilerbin, "mpiexec") - compiler_env = create_compiler_env(cc, cxx, f90) + mpihome = os.path.join(compilerbin, "..") + compiler_env = create_compiler_env(cc, cxx, f90, mpihome) # Make sure that we link against the right MPI and PETSc shared libraries link_env = { @@ -1877,9 +1866,6 @@ if mode == "install": install(p+"/") sys.path.append(os.getcwd() + "/" + p) - with environment(**compiler_env): - build_and_install_libsupermesh(cc, cxx, f90, mpiexec) - with pipargs("--no-deps"), environment(**compiler_env, **link_env): install("firedrake/") @@ -1986,7 +1972,8 @@ else: cxx = os.path.join(compilerbin, "mpicxx") f90 = os.path.join(compilerbin, "mpif90") mpiexec = os.path.join(compilerbin, "mpiexec") - compiler_env = create_compiler_env(cc, cxx, f90) + mpihome = os.path.join(compilerbin, "..") + compiler_env = create_compiler_env(cc, cxx, f90, mpihome) # Make sure that we link against the right MPI and PETSc shared libraries link_env = { @@ -2059,9 +2046,6 @@ Please consider updating your PETSc manually. else: install(p+"/") - with environment(**compiler_env): - build_and_install_libsupermesh(cc, cxx, f90, mpiexec) - with pipargs("--no-deps"), environment(**compiler_env, **link_env): install("firedrake/") diff --git a/setup.py b/setup.py index cf970c1712..52112e1500 100644 --- a/setup.py +++ b/setup.py @@ -6,6 +6,7 @@ import pybind11 import petsc4py import rtree +import libsupermesh import pkgconfig from dataclasses import dataclass, field from setuptools import setup, find_packages, Extension @@ -145,6 +146,9 @@ def __getitem__(self, key): # In the next 2 linkages we are using `site.getsitepackages()[0]`, which isn't # guaranteed to be the correct place we could also use "$ORIGIN/../../lib_dir", # but that definitely doesn't work with editable installs. +# This is necessary because Python build isolation means that the compile-time +# library dirs (in the isolated build env) are different to the run-time +# library dirs (in the venv). # libspatialindex # example: @@ -159,15 +163,15 @@ def __getitem__(self, key): # libsupermesh # example: -# gcc -I/supermesh/include -# gcc /supermesh/supermesh.cpython-311-x86_64-linux-gnu.so \ +# gcc -Ipath/to/libsupermesh/include +# gcc path/to/libsupermesh/libsupermesh.cpython-311-x86_64-linux-gnu.so \ # -lsupermesh \ -# -Wl,-rpath,$ORIGIN/../../supermesh -supermesh_ = ExternalDependency( - include_dirs=[f"{sys.prefix}/include"], - library_dirs=[f"{sys.prefix}/lib"], +# -Wl,-rpath,$ORIGIN/../../libsupermesh +libsupermesh_ = ExternalDependency( + include_dirs=[libsupermesh.get_include()], + library_dirs=[str(Path(libsupermesh.get_library()).parent)], + runtime_library_dirs=[os.path.join(site.getsitepackages()[0], "libsupermesh", "lib")], libraries=["supermesh"], - runtime_library_dirs=[f"{sys.prefix}/lib"], ) # The following extensions need to be linked accordingly: @@ -221,7 +225,7 @@ def extensions(): name="firedrake.cython.supermeshimpl", language="c", sources=[os.path.join("firedrake", "cython", "supermeshimpl.pyx")], - **(petsc_ + numpy_ + supermesh_) + **(petsc_ + numpy_ + libsupermesh_) )) # pyop2/sparsity.pyx: petsc, numpy, cython_list.append(Extension( diff --git a/tests/firedrake/regression/test_mesh_generation.py b/tests/firedrake/regression/test_mesh_generation.py index 637b0a1502..d2efe2a8b4 100644 --- a/tests/firedrake/regression/test_mesh_generation.py +++ b/tests/firedrake/regression/test_mesh_generation.py @@ -476,6 +476,28 @@ def test_boxmesh_kind(kind, num_cells): assert m.num_cells() == num_cells +@pytest.mark.parallel(nprocs=2) +def test_periodic_unit_cube_hex_cell(): + mesh = PeriodicUnitCubeMesh(3, 3, 3, directions=[True, True, False], hexahedral=True) + x, y, z = SpatialCoordinate(mesh) + V = FunctionSpace(mesh, "CG", 3) + expr = (1 - x) * x + (1 - y) * y + z + f = Function(V).interpolate(expr) + error = assemble((f - expr) ** 2 * dx) + assert error < 1.e-30 + + +@pytest.mark.parallel(nprocs=4) +def test_periodic_unit_cube_hex_facet(): + mesh = PeriodicUnitCubeMesh(3, 3, 3, directions=[True, False, False], hexahedral=True) + for subdomain_id in [1, 2]: + area = assemble(Constant(1.) * dS(domain=mesh, subdomain_id=subdomain_id)) + assert abs(area - 1.0) < 1.e-15 + for subdomain_id in [3, 4, 5, 6]: + area = assemble(Constant(1.) * ds(domain=mesh, subdomain_id=subdomain_id)) + assert abs(area - 1.0) < 1.e-15 + + @pytest.mark.parallel(nprocs=4) def test_split_comm_dm_mesh(): nspace = 2 diff --git a/tsfc/driver.py b/tsfc/driver.py index 6e3c3baaf3..38a780a0ad 100644 --- a/tsfc/driver.py +++ b/tsfc/driver.py @@ -47,14 +47,13 @@ """ -def compile_form(form, prefix="form", parameters=None, interface=None, diagonal=False, log=False): +def compile_form(form, prefix="form", parameters=None, interface=None, diagonal=False): """Compiles a UFL form into a set of assembly kernels. :arg form: UFL form :arg prefix: kernel name will start with this string :arg parameters: parameters object :arg diagonal: Are we building a kernel for the diagonal of a rank-2 element tensor? - :arg log: bool if the Kernel should be profiled with Log events :returns: list of kernels """ cpu_time = time.time() @@ -71,7 +70,7 @@ def compile_form(form, prefix="form", parameters=None, interface=None, diagonal= kernels = [] for integral_data in fd.integral_data: start = time.time() - kernel = compile_integral(integral_data, fd, prefix, parameters, interface=interface, diagonal=diagonal, log=log) + kernel = compile_integral(integral_data, fd, prefix, parameters, interface=interface, diagonal=diagonal) if kernel is not None: kernels.append(kernel) logger.info(GREEN % "compile_integral finished in %g seconds.", time.time() - start) @@ -80,7 +79,7 @@ def compile_form(form, prefix="form", parameters=None, interface=None, diagonal= return kernels -def compile_integral(integral_data, form_data, prefix, parameters, interface, *, diagonal=False, log=False): +def compile_integral(integral_data, form_data, prefix, parameters, interface, *, diagonal=False): """Compiles a UFL integral into an assembly kernel. :arg integral_data: UFL integral data @@ -89,7 +88,6 @@ def compile_integral(integral_data, form_data, prefix, parameters, interface, *, :arg parameters: parameters object :arg interface: backend module for the kernel interface :arg diagonal: Are we building a kernel for the diagonal of a rank-2 element tensor? - :arg log: bool if the Kernel should be profiled with Log events :returns: a kernel constructed by the kernel interface """ parameters = preprocess_parameters(parameters) @@ -137,7 +135,7 @@ def compile_integral(integral_data, form_data, prefix, parameters, interface, *, integrand_exprs = builder.compile_integrand(integrand, params, ctx) integral_exprs = builder.construct_integrals(integrand_exprs, params) builder.stash_integrals(integral_exprs, params, ctx) - return builder.construct_kernel(kernel_name, ctx, log) + return builder.construct_kernel(kernel_name, ctx, parameters["add_petsc_events"]) def preprocess_parameters(parameters): @@ -157,7 +155,7 @@ def preprocess_parameters(parameters): def compile_expression_dual_evaluation(expression, to_element, ufl_element, *, domain=None, interface=None, - parameters=None, log=False): + parameters=None): """Compile a UFL expression to be evaluated against a compile-time known reference element's dual basis. Useful for interpolating UFL expressions into e.g. N1curl spaces. @@ -168,7 +166,6 @@ def compile_expression_dual_evaluation(expression, to_element, ufl_element, *, :arg domain: optional UFL domain the expression is defined on (required when expression contains no domain). :arg interface: backend module for the kernel interface :arg parameters: parameters object - :arg log: bool if the Kernel should be profiled with Log events :returns: Loopy-based ExpressionKernel object. """ if parameters is None: @@ -267,7 +264,7 @@ def compile_expression_dual_evaluation(expression, to_element, ufl_element, *, builder.register_requirements([evaluation]) builder.set_output(return_var) # Build kernel tuple - return builder.construct_kernel(impero_c, index_names, needs_external_coords, log=log) + return builder.construct_kernel(impero_c, index_names, needs_external_coords, parameters["add_petsc_events"]) class DualEvaluationCallable(object): diff --git a/tsfc/parameters.py b/tsfc/parameters.py index 1277713ad5..af44ce0cd4 100644 --- a/tsfc/parameters.py +++ b/tsfc/parameters.py @@ -20,6 +20,9 @@ # So that tests pass (needs to match scalar_type) "scalar_type_c": "double", + + # Whether to wrap the generated kernels in a PETSc event + "add_petsc_events": False, }