From 381f2a8b8264706ef5bad5a509350815754c523d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Thu, 8 Feb 2024 14:54:50 +0100 Subject: [PATCH 01/49] Update readme (#57) --- README.md | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 977978d..8f1271d 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ This is an extension for [DOLFINx](https://github.com/FEniCS/dolfinx/) to checkpoint meshes, meshtags and functions using [ADIOS2](https://adios2.readthedocs.io/en/latest/). -The code uses the adios2 Python-wrappers to write DOLFINx objects to file, supporting N-to-M (*recoverable*) and N-to-N (*snapshot*) checkpointing. +The code uses the adios2 Python-wrappers to write DOLFINx objects to file, supporting N-to-M (_recoverable_) and N-to-N (_snapshot_) checkpointing. See: [Checkpointing in DOLFINx - FEniCS 23](https://jsdokken.com/checkpointing-presentation/#/) for more information. For scalability, the code uses [MPI Neighbourhood collectives](https://www.mpi-forum.org/docs/mpi-3.1/mpi31-report/node200.htm) for communication across processes. @@ -13,13 +13,17 @@ For scalability, the code uses [MPI Neighbourhood collectives](https://www.mpi-f ## Installation ### Docker + ADIOS2 is installed in the official DOLFINx containers. + ```bash docker run -ti -v $(pwd):/root/shared -w /root/shared --name=dolfinx-checkpoint ghcr.io/fenics/dolfinx/dolfinx:nightly ``` ### Conda -To use with conda (DOLFINx release v0.7.0 works with v0.7.2 of ADIOS4DOLFINx) + +To use with conda (DOLFINx release v0.7.0 works with v0.7.3 of ADIOS4DOLFINx) + ```bash conda create -n dolfinx-checkpoint python=3.10 conda activate dolfinx-checkpoint @@ -27,11 +31,10 @@ conda install -c conda-forge fenics-dolfinx pip adios2 python3 -m pip install git+https://github.com/jorgensd/adios4dolfinx@v0.7.2 ``` -## Functionality - - +## Functionality ### DOLFINx + - Reading and writing meshes, using `adios4dolfinx.read/write_mesh` - Reading and writing meshtags associated to meshes `adios4dolfinx.read/write_meshtags` - Reading checkpoints for any element (serial and parallel, arbitrary number of functions and timesteps per file). Use `adios4dolfinx.read/write_function`. @@ -45,15 +48,15 @@ python3 -m pip install git+https://github.com/jorgensd/adios4dolfinx@v0.7.2 > [!IMPORTANT] > Only one mesh per file is allowed - ### Backwards compatibility -> [!WARNING] -> If you are using checkpoints written with `adios4dolfinx<0.7.2` please use the `legacy=True` flag for reading in the checkpoint with -> with any newer version +> [!WARNING] +> If you are using v0.7.2, you are adviced to upgrade to v0.7.3, as it contains som crucial fixes for openmpi. ### Legacy DOLFIN + Only checkpoints for `Lagrange` or `DG` functions are supported from legacy DOLFIN + - Reading meshes from the DOLFIN HDF5File-format - Reading checkpoints from the DOLFIN HDF5File-format (one checkpoint per file only) - Reading checkpoints from the DOLFIN XDMFFile-format (one checkpoint per file only, and only uses the `.h5` file) @@ -61,4 +64,5 @@ Only checkpoints for `Lagrange` or `DG` functions are supported from legacy DOLF See the [API](./docs/api) for more information. ## Long term plan + The long term plan is to get this library merged into DOLFINx (rewritten in C++ with appropriate Python-bindings). From ab99eff2ca136abb6e4591e10b7ebf19649788ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Fri, 9 Feb 2024 13:27:53 +0100 Subject: [PATCH 02/49] Add openmpi to CI (#61) * Add openmpi and failing adios2 version to CI. * Remove formatting-check from openmpi * Add petsc_arch * Fix outsize dtype * Add debug print * Remove error from print * Set environment variable * Revert debug print * Set ompi flags to openmpi pipeline * Deactivate v2.10.0-rc1 waiting for #60 --- .github/workflows/deploy_pages.yml | 5 +- .github/workflows/test_package_openmpi.yml | 80 ++++++++++++++++++++++ src/adios4dolfinx/legacy_readers.py | 2 +- 3 files changed, 85 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/test_package_openmpi.yml diff --git a/.github/workflows/deploy_pages.yml b/.github/workflows/deploy_pages.yml index 9bd4183..c774df0 100644 --- a/.github/workflows/deploy_pages.yml +++ b/.github/workflows/deploy_pages.yml @@ -22,8 +22,11 @@ jobs: run-coverage: uses: ./.github/workflows/test_package.yml + test-openmpi: + uses: ./.github/workflows/test_package_openmpi.yml + deploy: - needs: [run-coverage, build-docs] + needs: [run-coverage, build-docs, test-openmpi] environment: name: github-pages diff --git a/.github/workflows/test_package_openmpi.yml b/.github/workflows/test_package_openmpi.yml new file mode 100644 index 0000000..497484b --- /dev/null +++ b/.github/workflows/test_package_openmpi.yml @@ -0,0 +1,80 @@ +name: Test package with openmpi + +on: + push: + # The CI is executed on every push on every branch + branches: + - "**" + pull_request: + # The CI is executed on every pull request to the main branch + branches: + - main + + workflow_call: + workflow_dispatch: + schedule: + # The CI is executed every day at 8am + - cron: "0 8 * * *" + +jobs: + + create-datasets: + uses: ./.github/workflows/create_legacy_data.yml + + create-legacy-datasets: + uses: ./.github/workflows/create_legacy_checkpoint.yml + + test-code: + runs-on: "ubuntu-22.04" + needs: [create-datasets, create-legacy-datasets] + container: ghcr.io/fenics/test-env:current-openmpi + env: + DEB_PYTHON_INSTALL_LAYOUT: deb_system + PETSC_ARCH: "linux-gnu-real64-32" + OMPI_ALLOW_RUN_AS_ROOT: 1 + OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 + + strategy: + matrix: + adios2: ["default"] # , "v2.10.0-rc1"] + steps: + - uses: actions/checkout@v4 + + - name: Update pip + run: python3 -m pip install --upgrade pip setuptools + + - name: Install DOLFINx + uses: jorgensd/actions/install-dolfinx@v0.2.0 + with: + adios2: ${{ matrix.adios2 }} + petsc_arch: ${{ env.PETSC_ARCH }} + + - name: Download legacy data + uses: actions/download-artifact@v4 + with: + name: legacy + path: ./legacy + + - name: Install package + run: python3 -m pip install .[test] + + - name: Run tests + run: | + coverage run --rcfile=.coveragerc -m mpi4py -m pytest -xvs ./tests/ + + - name: Run tests in parallel + run: | + mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py -m pytest -xvs ./tests/ + + - name: Combine coverage reports + run: | + coverage combine + coverage report -m + coverage html + + - name: Upload coverage report as artifact + uses: actions/upload-artifact@v4 + with: + name: code-coverage-report + path: htmlcov + if-no-files-found: error diff --git a/src/adios4dolfinx/legacy_readers.py b/src/adios4dolfinx/legacy_readers.py index 0cb19b7..c8766bf 100644 --- a/src/adios4dolfinx/legacy_readers.py +++ b/src/adios4dolfinx/legacy_readers.py @@ -162,7 +162,7 @@ def send_cells_and_receive_dofmap_index( # Compute amount of data to send to each process owners_transposed = output_owners.reshape(-1, 1) process_pos_indicator = (owners_transposed == np.asarray(dest_ranks)) - out_size = np.count_nonzero(process_pos_indicator, axis=0) + out_size = np.count_nonzero(process_pos_indicator, axis=0).astype(np.int32) recv_size = np.zeros(len(source_ranks), dtype=np.int32) mesh_to_data_comm = comm.Create_dist_graph_adjacent( From f5cc16d00344fe66e197a8c116c131df4ccc3a34 Mon Sep 17 00:00:00 2001 From: "Jorgen S. Dokken" Date: Fri, 9 Feb 2024 13:07:37 +0000 Subject: [PATCH 03/49] Fixing CI by using specific artifact names --- .github/workflows/build_docs.yml | 17 ++++++++++++- .../workflows/create_legacy_checkpoint.yml | 6 ++++- .github/workflows/create_legacy_data.yml | 8 ++++-- .github/workflows/deploy_pages.yml | 2 ++ .github/workflows/test_package.yml | 8 ++++-- .github/workflows/test_package_openmpi.yml | 25 ++++++++----------- 6 files changed, 46 insertions(+), 20 deletions(-) diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index 947bb56..eb14a44 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -10,11 +10,16 @@ on: # Allows you to run this workflow manually from the Actions tab workflow_dispatch: workflow_call: + inputs: + artifact_name: + type: string + required: true env: # Directory that will be published on github pages PUBLISH_DIR: ./_build/html DEB_PYTHON_INSTALL_LAYOUT: deb_system + ARTIFACT_NAME: "docs" jobs: build-docs: @@ -37,10 +42,20 @@ jobs: - name: Build docs run: jupyter book build -W . + - name: Upload documentation as artifact (from workflow call) + uses: actions/upload-artifact@v4 + if: ${ { github.event_name } == 'workflow_call' + with: + name: ${{ inputs.artifact_name }} + path: ${{ env.PUBLISH_DIR }} + if-no-files-found: error + + - name: Upload documentation as artifact uses: actions/upload-artifact@v4 + if: ${ { github.event_name } != 'workflow_call' with: - name: documentation + name: ${{ env.ARTIFACT_NAME }} path: ${{ env.PUBLISH_DIR }} if-no-files-found: error diff --git a/.github/workflows/create_legacy_checkpoint.yml b/.github/workflows/create_legacy_checkpoint.yml index 2bcc2cf..9b7dd91 100644 --- a/.github/workflows/create_legacy_checkpoint.yml +++ b/.github/workflows/create_legacy_checkpoint.yml @@ -2,6 +2,10 @@ name: Generate adios4dolfinx legacy data on: workflow_call: + inputs: + artifact_name: + type: string + required: true env: data_dir: "legacy_checkpoint" @@ -23,5 +27,5 @@ jobs: - uses: actions/upload-artifact@v4 with: - name: ${{ env.data_dir }} + name: ${{inputs.artifact_name}_${{ env.data_dir }} path: ./${{ env.data_dir }} diff --git a/.github/workflows/create_legacy_data.yml b/.github/workflows/create_legacy_data.yml index 79f923e..9887c8f 100644 --- a/.github/workflows/create_legacy_data.yml +++ b/.github/workflows/create_legacy_data.yml @@ -2,6 +2,10 @@ name: Generate data from Legacy DOLFIN on: workflow_call: + inputs: + artifact_name: + type: string + required: true env: data_dir: "legacy" @@ -17,7 +21,7 @@ jobs: - name: Create datasets run: python3 ./tests/create_legacy_data.py --output-dir=$data_dir - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v4 with: - name: ${{ env.data_dir }} + name: ${{inputs.artifact_name}_${{ env.data_dir }} path: ./${{ env.data_dir }} diff --git a/.github/workflows/deploy_pages.yml b/.github/workflows/deploy_pages.yml index c774df0..9bc4585 100644 --- a/.github/workflows/deploy_pages.yml +++ b/.github/workflows/deploy_pages.yml @@ -18,6 +18,8 @@ concurrency: jobs: build-docs: uses: ./.github/workflows/build_docs.yml + with: + artifact_name: "documentation" run-coverage: uses: ./.github/workflows/test_package.yml diff --git a/.github/workflows/test_package.yml b/.github/workflows/test_package.yml index f24225c..102c58c 100644 --- a/.github/workflows/test_package.yml +++ b/.github/workflows/test_package.yml @@ -20,9 +20,13 @@ jobs: create-datasets: uses: ./.github/workflows/create_legacy_data.yml + with: + artifact_name: "legacy_mpich" create-legacy-datasets: uses: ./.github/workflows/create_legacy_checkpoint.yml + with: + artifact_name: "legacy_checkpoint_mpich" check-formatting: uses: ./.github/workflows/check_formatting.yml @@ -44,13 +48,13 @@ jobs: - name: Download legacy data uses: actions/download-artifact@v4 with: - name: legacy + name: legacy_mpich path: ./legacy - name: Download legacy data uses: actions/download-artifact@v4 with: - name: legacy_checkpoint + name: legacy_checkpoint_mpich path: ./legacy_checkpoint - name: Install package diff --git a/.github/workflows/test_package_openmpi.yml b/.github/workflows/test_package_openmpi.yml index 497484b..b3253d8 100644 --- a/.github/workflows/test_package_openmpi.yml +++ b/.github/workflows/test_package_openmpi.yml @@ -20,9 +20,13 @@ jobs: create-datasets: uses: ./.github/workflows/create_legacy_data.yml + with: + artifact_name: "legacy_ompi" create-legacy-datasets: uses: ./.github/workflows/create_legacy_checkpoint.yml + with: + artifact_name: "legacy_checkpoint_ompi" test-code: runs-on: "ubuntu-22.04" @@ -53,7 +57,13 @@ jobs: uses: actions/download-artifact@v4 with: name: legacy - path: ./legacy + path: ./legacy_ompi + + - name: Download legacy data + uses: actions/download-artifact@v4 + with: + name: legacy_checkpoint + path: ./legacy_checkpoint_ompi - name: Install package run: python3 -m pip install .[test] @@ -65,16 +75,3 @@ jobs: - name: Run tests in parallel run: | mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py -m pytest -xvs ./tests/ - - - name: Combine coverage reports - run: | - coverage combine - coverage report -m - coverage html - - - name: Upload coverage report as artifact - uses: actions/upload-artifact@v4 - with: - name: code-coverage-report - path: htmlcov - if-no-files-found: error From 5e0ea1f4d2698faafd276e66f813537d9d26c81e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Fri, 9 Feb 2024 15:02:52 +0100 Subject: [PATCH 04/49] Fix CI for multiple artifact uploads with different compilers --- .github/workflows/build_docs.yml | 6 ++--- .../workflows/create_legacy_checkpoint.yml | 12 ++++----- .github/workflows/create_legacy_data.yml | 25 ++++++++++--------- .github/workflows/test_package_openmpi.yml | 8 +++--- 4 files changed, 26 insertions(+), 25 deletions(-) diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index eb14a44..1193768 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -10,7 +10,7 @@ on: # Allows you to run this workflow manually from the Actions tab workflow_dispatch: workflow_call: - inputs: + inputs: artifact_name: type: string required: true @@ -44,7 +44,7 @@ jobs: - name: Upload documentation as artifact (from workflow call) uses: actions/upload-artifact@v4 - if: ${ { github.event_name } == 'workflow_call' + if: ${{ github.event_name == 'workflow_call' }} with: name: ${{ inputs.artifact_name }} path: ${{ env.PUBLISH_DIR }} @@ -53,7 +53,7 @@ jobs: - name: Upload documentation as artifact uses: actions/upload-artifact@v4 - if: ${ { github.event_name } != 'workflow_call' + if: ${{ github.event_name != 'workflow_call' }} with: name: ${{ env.ARTIFACT_NAME }} path: ${{ env.PUBLISH_DIR }} diff --git a/.github/workflows/create_legacy_checkpoint.yml b/.github/workflows/create_legacy_checkpoint.yml index 9b7dd91..abcf78a 100644 --- a/.github/workflows/create_legacy_checkpoint.yml +++ b/.github/workflows/create_legacy_checkpoint.yml @@ -2,17 +2,17 @@ name: Generate adios4dolfinx legacy data on: workflow_call: - inputs: + inputs: artifact_name: type: string required: true - -env: - data_dir: "legacy_checkpoint" - adios4dolfinx_version: "0.7.1" + description: "Name of the artifact to be created" jobs: create-adios-data: + env: + data_dir: "legacy_checkpoint" + adios4dolfinx_version: "0.7.1" runs-on: "ubuntu-22.04" container: ghcr.io/fenics/dolfinx/dolfinx:v0.7.3 @@ -27,5 +27,5 @@ jobs: - uses: actions/upload-artifact@v4 with: - name: ${{inputs.artifact_name}_${{ env.data_dir }} + name: ${{ inputs.artifact_name }} path: ./${{ env.data_dir }} diff --git a/.github/workflows/create_legacy_data.yml b/.github/workflows/create_legacy_data.yml index 9887c8f..d5d5f6f 100644 --- a/.github/workflows/create_legacy_data.yml +++ b/.github/workflows/create_legacy_data.yml @@ -2,26 +2,27 @@ name: Generate data from Legacy DOLFIN on: workflow_call: - inputs: + inputs: artifact_name: type: string required: true - -env: - data_dir: "legacy" - + description: "Name of the artifact to be created" jobs: + create-dolfin-data: + env: + data_dir: "legacy" + runs-on: "ubuntu-22.04" container: ghcr.io/scientificcomputing/fenics:2023-11-15 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v4 - - name: Create datasets - run: python3 ./tests/create_legacy_data.py --output-dir=$data_dir + - name: Create datasets + run: python3 ./tests/create_legacy_data.py --output-dir=$data_dir - - uses: actions/upload-artifact@v4 - with: - name: ${{inputs.artifact_name}_${{ env.data_dir }} - path: ./${{ env.data_dir }} + - uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.artifact_name }} + path: ./${{ env.data_dir }} diff --git a/.github/workflows/test_package_openmpi.yml b/.github/workflows/test_package_openmpi.yml index b3253d8..2581abf 100644 --- a/.github/workflows/test_package_openmpi.yml +++ b/.github/workflows/test_package_openmpi.yml @@ -56,14 +56,14 @@ jobs: - name: Download legacy data uses: actions/download-artifact@v4 with: - name: legacy - path: ./legacy_ompi + name: legacy_ompi + path: ./legacy - name: Download legacy data uses: actions/download-artifact@v4 with: - name: legacy_checkpoint - path: ./legacy_checkpoint_ompi + name: legacy_checkpoint_ompi + path: ./legacy_checkpoint - name: Install package run: python3 -m pip install .[test] From 31f75b5d4fbaec6612ba591485378f7f678395cb Mon Sep 17 00:00:00 2001 From: "Jorgen S. Dokken" Date: Fri, 9 Feb 2024 14:13:45 +0000 Subject: [PATCH 05/49] try updating build docs --- .github/workflows/build_docs.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index 1193768..b43a57d 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -44,7 +44,7 @@ jobs: - name: Upload documentation as artifact (from workflow call) uses: actions/upload-artifact@v4 - if: ${{ github.event_name == 'workflow_call' }} + if: ${{ github.event_name }} == 'workflow_call' with: name: ${{ inputs.artifact_name }} path: ${{ env.PUBLISH_DIR }} @@ -53,7 +53,7 @@ jobs: - name: Upload documentation as artifact uses: actions/upload-artifact@v4 - if: ${{ github.event_name != 'workflow_call' }} + if: ${{ github.event_name }} != 'workflow_call' with: name: ${{ env.ARTIFACT_NAME }} path: ${{ env.PUBLISH_DIR }} From 399a048be975ab2badf464a68361a3a1f9dfb402 Mon Sep 17 00:00:00 2001 From: "Jorgen S. Dokken" Date: Fri, 9 Feb 2024 14:18:53 +0000 Subject: [PATCH 06/49] Fix if statement --- .github/workflows/build_docs.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index b43a57d..199fe6b 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -44,7 +44,7 @@ jobs: - name: Upload documentation as artifact (from workflow call) uses: actions/upload-artifact@v4 - if: ${{ github.event_name }} == 'workflow_call' + if: github.event_name == 'workflow_call' with: name: ${{ inputs.artifact_name }} path: ${{ env.PUBLISH_DIR }} @@ -53,7 +53,7 @@ jobs: - name: Upload documentation as artifact uses: actions/upload-artifact@v4 - if: ${{ github.event_name }} != 'workflow_call' + if: github.event_name != 'workflow_call' with: name: ${{ env.ARTIFACT_NAME }} path: ${{ env.PUBLISH_DIR }} From eb055b2e229db3ed112d7cc83166eb9c5b1774a7 Mon Sep 17 00:00:00 2001 From: "Jorgen S. Dokken" Date: Fri, 9 Feb 2024 14:26:05 +0000 Subject: [PATCH 07/49] Rename documentation and simplify logic --- .github/workflows/build_docs.yml | 13 ------------- .github/workflows/deploy_pages.yml | 5 ++--- 2 files changed, 2 insertions(+), 16 deletions(-) diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index 199fe6b..432e477 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -10,10 +10,6 @@ on: # Allows you to run this workflow manually from the Actions tab workflow_dispatch: workflow_call: - inputs: - artifact_name: - type: string - required: true env: # Directory that will be published on github pages @@ -42,15 +38,6 @@ jobs: - name: Build docs run: jupyter book build -W . - - name: Upload documentation as artifact (from workflow call) - uses: actions/upload-artifact@v4 - if: github.event_name == 'workflow_call' - with: - name: ${{ inputs.artifact_name }} - path: ${{ env.PUBLISH_DIR }} - if-no-files-found: error - - - name: Upload documentation as artifact uses: actions/upload-artifact@v4 if: github.event_name != 'workflow_call' diff --git a/.github/workflows/deploy_pages.yml b/.github/workflows/deploy_pages.yml index 9bc4585..3828b9f 100644 --- a/.github/workflows/deploy_pages.yml +++ b/.github/workflows/deploy_pages.yml @@ -18,8 +18,7 @@ concurrency: jobs: build-docs: uses: ./.github/workflows/build_docs.yml - with: - artifact_name: "documentation" + run-coverage: uses: ./.github/workflows/test_package.yml @@ -40,7 +39,7 @@ jobs: # docs artifact is uploaded by build-docs job uses: actions/download-artifact@v4 with: - name: documentation + name: docs path: "./public" - name: Download docs artifact From f07d26485c321856fe60349c9d97389fc0f589ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Sun, 11 Feb 2024 12:44:19 +0100 Subject: [PATCH 08/49] Remove gdim from `basix.ufl.element` (#65) * Fixes related to https://github.com/FEniCS/basix/pull/772 --- src/adios4dolfinx/checkpointing.py | 1 - src/adios4dolfinx/legacy_readers.py | 1 - tests/test_checkpointing.py | 4 ---- tests/test_checkpointing_vector.py | 15 +++++---------- tests/test_numpy_vectorization.py | 9 +++------ 5 files changed, 8 insertions(+), 22 deletions(-) diff --git a/src/adios4dolfinx/checkpointing.py b/src/adios4dolfinx/checkpointing.py index f190f75..4a780b5 100644 --- a/src/adios4dolfinx/checkpointing.py +++ b/src/adios4dolfinx/checkpointing.py @@ -497,7 +497,6 @@ def read_mesh( degree, basix.LagrangeVariant(int(lvar)), shape=(mesh_geometry.shape[1],), - gdim=mesh_geometry.shape[1], dtype=mesh_geometry.dtype, ) domain = ufl.Mesh(element) diff --git a/src/adios4dolfinx/legacy_readers.py b/src/adios4dolfinx/legacy_readers.py index c8766bf..1a0f148 100644 --- a/src/adios4dolfinx/legacy_readers.py +++ b/src/adios4dolfinx/legacy_readers.py @@ -316,7 +316,6 @@ def read_mesh_from_legacy_h5( 1, basix.LagrangeVariant.equispaced, shape=(mesh_geometry.shape[1],), - gdim=mesh_geometry.shape[1], ) domain = ufl.Mesh(element) return dolfinx.mesh.create_mesh( diff --git a/tests/test_checkpointing.py b/tests/test_checkpointing.py index 3e916e9..c92b21d 100644 --- a/tests/test_checkpointing.py +++ b/tests/test_checkpointing.py @@ -46,7 +46,6 @@ def test_read_write_P_2D(read_comm, family, degree, complex, mesh_2D): mesh.ufl_cell().cellname(), degree, basix.LagrangeVariant.gll_warped, - gdim=mesh.geometry.dim, shape=(mesh.geometry.dim, ), dtype=mesh.geometry.x.dtype) @@ -72,7 +71,6 @@ def test_read_write_P_3D(read_comm, family, degree, complex, mesh_3D): mesh.ufl_cell().cellname(), degree, basix.LagrangeVariant.gll_warped, - gdim=mesh.geometry.dim, shape=(mesh.geometry.dim, )) def f(x): @@ -100,7 +98,6 @@ def test_read_write_P_2D_time(read_comm, family, degree, complex, mesh_2D): mesh.ufl_cell().cellname(), degree, basix.LagrangeVariant.gll_warped, - gdim=mesh.geometry.dim, shape=(mesh.geometry.dim, ), dtype=mesh.geometry.x.dtype) @@ -134,7 +131,6 @@ def test_read_write_P_3D_time(read_comm, family, degree, complex, mesh_3D): mesh.ufl_cell().cellname(), degree, basix.LagrangeVariant.gll_warped, - gdim=mesh.geometry.dim, shape=(mesh.geometry.dim, )) def f(x): diff --git a/tests/test_checkpointing_vector.py b/tests/test_checkpointing_vector.py index 408bdf9..42b30db 100644 --- a/tests/test_checkpointing_vector.py +++ b/tests/test_checkpointing_vector.py @@ -58,8 +58,7 @@ def test_read_write_2D(read_comm, family, degree, complex, simplex_mesh_2D): f_dtype = get_dtype(mesh.geometry.x.dtype, complex) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), - degree, - gdim=mesh.geometry.dim) + degree) def f(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) @@ -81,8 +80,7 @@ def test_read_write_3D(read_comm, family, degree, complex, simplex_mesh_3D): f_dtype = get_dtype(mesh.geometry.x.dtype, complex) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), - degree, - gdim=mesh.geometry.dim) + degree) def f(x): values = np.empty((3, x.shape[1]), dtype=f_dtype) @@ -104,8 +102,7 @@ def test_read_write_2D_quad(read_comm, family, degree, complex, non_simplex_mesh f_dtype = get_dtype(mesh.geometry.x.dtype, complex) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), - degree, - gdim=mesh.geometry.dim) + degree) def f(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) @@ -127,8 +124,7 @@ def test_read_write_hex(read_comm, family, degree, complex, non_simplex_mesh_3D) f_dtype = get_dtype(mesh.geometry.x.dtype, complex) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), - degree, - gdim=mesh.geometry.dim) + degree) def f(x): values = np.empty((3, x.shape[1]), dtype=f_dtype) @@ -151,8 +147,7 @@ def test_read_write_multiple(read_comm, family, degree, complex, non_simplex_mes f_dtype = get_dtype(mesh.geometry.x.dtype, complex) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), - degree, - gdim=mesh.geometry.dim) + degree) def f(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) diff --git a/tests/test_numpy_vectorization.py b/tests/test_numpy_vectorization.py index d32eec2..88dcfd2 100644 --- a/tests/test_numpy_vectorization.py +++ b/tests/test_numpy_vectorization.py @@ -82,8 +82,7 @@ def test_unroll_P(family, degree, mesh_2D): def test_unroll_RTCF(family, degree, mesh_3D): el = basix.ufl.element(family, mesh_3D.ufl_cell().cellname(), - degree, - gdim=mesh_3D.geometry.dim) + degree) V = dolfinx.fem.functionspace(mesh_3D, el) dofmap = V.dofmap @@ -105,8 +104,7 @@ def test_unroll_RTCF(family, degree, mesh_3D): def test_compute_dofmap_pos_RTCF(family, degree, mesh_3D): el = basix.ufl.element(family, mesh_3D.ufl_cell().cellname(), - degree, - gdim=mesh_3D.geometry.dim) + degree) V = dolfinx.fem.functionspace(mesh_3D, el) local_cells, local_pos = compute_dofmap_pos(V) @@ -123,8 +121,7 @@ def test_compute_dofmap_pos_RTCF(family, degree, mesh_3D): def test_compute_dofmap_pos_P(family, degree, mesh_2D): el = basix.ufl.element(family, mesh_2D.ufl_cell().cellname(), - degree, - gdim=mesh_2D.geometry.dim) + degree) V = dolfinx.fem.functionspace(mesh_2D, el) local_cells, local_pos = compute_dofmap_pos(V) From 7e08ceb26276644e0acc4769bb24fb6a14e16c61 Mon Sep 17 00:00:00 2001 From: Nate <34454754+nate-sime@users.noreply.github.com> Date: Mon, 12 Feb 2024 08:50:40 -0700 Subject: [PATCH 09/49] Backward compatible support for ADIOS2 2.10.x (#60) * add v2.10.0 to workflow * mypy conforming approach Co-authored-by: nate-sime <> --- .github/workflows/test_package_openmpi.yml | 2 +- src/adios4dolfinx/adios2_helpers.py | 10 +++++++++- src/adios4dolfinx/checkpointing.py | 6 ++++-- src/adios4dolfinx/legacy_readers.py | 7 +++++-- tests/test_snapshot_checkpoint.py | 6 +++++- 5 files changed, 24 insertions(+), 7 deletions(-) diff --git a/.github/workflows/test_package_openmpi.yml b/.github/workflows/test_package_openmpi.yml index 2581abf..a65c412 100644 --- a/.github/workflows/test_package_openmpi.yml +++ b/.github/workflows/test_package_openmpi.yml @@ -40,7 +40,7 @@ jobs: strategy: matrix: - adios2: ["default"] # , "v2.10.0-rc1"] + adios2: ["default", "v2.10.0-rc1"] steps: - uses: actions/checkout@v4 diff --git a/src/adios4dolfinx/adios2_helpers.py b/src/adios4dolfinx/adios2_helpers.py index 8df8ec4..4f9dd95 100644 --- a/src/adios4dolfinx/adios2_helpers.py +++ b/src/adios4dolfinx/adios2_helpers.py @@ -3,7 +3,6 @@ from mpi4py import MPI -import adios2 import dolfinx.cpp.graph import dolfinx.graph import numpy as np @@ -11,6 +10,15 @@ from .utils import compute_local_range, valid_function_types +import adios2 + + +def resolve_adios_scope(adios2): + return adios2.bindings if hasattr(adios2, "bindings") else adios2 + + +adios2 = resolve_adios_scope(adios2) + """ Helpers reading/writing data with ADIOS2 """ diff --git a/src/adios4dolfinx/checkpointing.py b/src/adios4dolfinx/checkpointing.py index 4a780b5..255e644 100644 --- a/src/adios4dolfinx/checkpointing.py +++ b/src/adios4dolfinx/checkpointing.py @@ -9,19 +9,21 @@ from mpi4py import MPI -import adios2 import basix import dolfinx import numpy as np import ufl from .adios2_helpers import (adios_to_numpy_dtype, read_array, read_cell_perms, - read_dofmap) + read_dofmap, resolve_adios_scope) from .comm_helpers import (send_and_recv_cell_perm, send_dofmap_and_recv_values, send_dofs_and_recv_values) from .utils import compute_dofmap_pos, compute_local_range, index_owner, unroll_dofmap +import adios2 +adios2 = resolve_adios_scope(adios2) + __all__ = [ "read_mesh", "write_function", diff --git a/src/adios4dolfinx/legacy_readers.py b/src/adios4dolfinx/legacy_readers.py index 1a0f148..43aea37 100644 --- a/src/adios4dolfinx/legacy_readers.py +++ b/src/adios4dolfinx/legacy_readers.py @@ -9,18 +9,21 @@ from mpi4py import MPI -import adios2 import basix import dolfinx import numpy as np import numpy.typing as npt import ufl -from .adios2_helpers import adios_to_numpy_dtype, read_array +from .adios2_helpers import (adios_to_numpy_dtype, read_array, + resolve_adios_scope) from .comm_helpers import send_dofs_and_recv_values from .utils import (compute_dofmap_pos, compute_local_range, index_owner) +import adios2 +adios2 = resolve_adios_scope(adios2) + __all__ = [ "read_mesh_from_legacy_h5", "read_function_from_legacy_h5", diff --git a/tests/test_snapshot_checkpoint.py b/tests/test_snapshot_checkpoint.py index 5421f98..1546523 100644 --- a/tests/test_snapshot_checkpoint.py +++ b/tests/test_snapshot_checkpoint.py @@ -2,14 +2,18 @@ from mpi4py import MPI -import adios2 import basix.ufl import dolfinx import numpy as np import pytest +from adios4dolfinx.adios2_helpers import resolve_adios_scope from adios4dolfinx import snapshot_checkpoint +import adios2 +adios2 = resolve_adios_scope(adios2) + + triangle = dolfinx.mesh.CellType.triangle quad = dolfinx.mesh.CellType.quadrilateral tetra = dolfinx.mesh.CellType.tetrahedron From fca9e328d50f458c042c0badb4ddafa5e27b5f06 Mon Sep 17 00:00:00 2001 From: Henrik Finsberg Date: Fri, 23 Feb 2024 22:23:48 +0100 Subject: [PATCH 10/49] Open file in write mode if it does not already exist (#67) * Open file in write mode if does not already exist * Appearantly .bp is not a file - so just check if the 'thing' exists --- src/adios4dolfinx/checkpointing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/adios4dolfinx/checkpointing.py b/src/adios4dolfinx/checkpointing.py index 255e644..f697a4c 100644 --- a/src/adios4dolfinx/checkpointing.py +++ b/src/adios4dolfinx/checkpointing.py @@ -536,12 +536,14 @@ def write_function( # If mode is append, check if we have written the function to file before name = u.name + if not Path(filename).exists(): + mode = adios2.Mode.Write + first_write = True if mode == adios2.Mode.Append: # First open the file in read-mode to check if the function has been written before read_file = io.Open(str(filename), adios2.Mode.Read) io.SetEngine(engine) - first_write = True for _ in range(read_file.Steps()): read_file.BeginStep() if name in io.AvailableAttributes(): From 083769c834bab9a9c2f307f9e4c4673787a3c9c3 Mon Sep 17 00:00:00 2001 From: Henrik Finsberg Date: Tue, 27 Feb 2024 08:48:51 +0100 Subject: [PATCH 11/49] Consistent name for argument `filename` (#68) * Be consitent about using 'filename' rather than 'file' and fix corresponding type hints * More type hints --- src/adios4dolfinx/adios2_helpers.py | 10 +++++----- src/adios4dolfinx/checkpointing.py | 28 ++++++++++++++-------------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/adios4dolfinx/adios2_helpers.py b/src/adios4dolfinx/adios2_helpers.py index 4f9dd95..62072c9 100644 --- a/src/adios4dolfinx/adios2_helpers.py +++ b/src/adios4dolfinx/adios2_helpers.py @@ -1,6 +1,6 @@ -import pathlib +from pathlib import Path from typing import Tuple - +from typing import Union from mpi4py import MPI import dolfinx.cpp.graph @@ -33,7 +33,7 @@ def resolve_adios_scope(adios2): def read_cell_perms( adios: adios2.ADIOS, comm: MPI.Intracomm, - filename: pathlib.Path, + filename: Union[Path, str], variable: str, num_cells_global: np.int64, engine: str, @@ -96,7 +96,7 @@ def read_cell_perms( def read_dofmap( adios: adios2.ADIOS, comm: MPI.Intracomm, - filename: pathlib.Path, + filename: Union[Path, str], dofmap: str, dofmap_offsets: str, num_cells_global: np.int64, @@ -175,7 +175,7 @@ def read_dofmap( def read_array( adios: adios2.ADIOS, - filename: pathlib.Path, array_name: str, engine: str, comm: MPI.Intracomm, + filename: Union[Path, str], array_name: str, engine: str, comm: MPI.Intracomm, time: float = 0., time_name: str = "", legacy: bool = False) -> Tuple[npt.NDArray[valid_function_types], int]: """ diff --git a/src/adios4dolfinx/checkpointing.py b/src/adios4dolfinx/checkpointing.py index f697a4c..0d4ea1b 100644 --- a/src/adios4dolfinx/checkpointing.py +++ b/src/adios4dolfinx/checkpointing.py @@ -35,13 +35,13 @@ ] -def snapshot_checkpoint(uh: dolfinx.fem.Function, file: Path, mode: adios2.Mode): +def snapshot_checkpoint(uh: dolfinx.fem.Function, filename: Union[Path, str], mode: adios2.Mode): """Read or write a snapshot checkpoint This checkpoint is only meant to be used on the same mesh during the same simulation. :param uh: The function to write data from or read to - :param file: The file to write to or read from + :param filename: The file to write to or read from :param mode: Either read or write """ # Create ADIOS IO @@ -51,7 +51,7 @@ def snapshot_checkpoint(uh: dolfinx.fem.Function, file: Path, mode: adios2.Mode) io.SetEngine("BP4") if mode not in [adios2.Mode.Write, adios2.Mode.Read]: raise ValueError("Got invalid mode {mode}") - adios_file = io.Open(str(file), mode) + adios_file = io.Open(str(filename), mode) if mode == adios2.Mode.Write: dofmap = uh.function_space.dofmap @@ -219,7 +219,7 @@ def write_meshtags(filename: Union[Path, str], mesh: dolfinx.mesh.Mesh, meshtags outfile.Close() -def read_meshtags(filename: str, mesh: dolfinx.mesh.Mesh, meshtag_name: str, +def read_meshtags(filename: Union[Path, str], mesh: dolfinx.mesh.Mesh, meshtag_name: str, engine: str = "BP4") -> dolfinx.mesh.MeshTags: """ Read meshtags from file and return a :class:`dolfinx.mesh.MeshTags` object. @@ -307,7 +307,7 @@ def read_meshtags(filename: str, mesh: dolfinx.mesh.Mesh, meshtag_name: str, return mt -def read_function(u: dolfinx.fem.Function, filename: Path, engine: str = "BP4", time: float = 0., +def read_function(u: dolfinx.fem.Function, filename: Union[Path, str], engine: str = "BP4", time: float = 0., legacy: bool = False): """ Read checkpoint from file and fill it into `u`. @@ -427,14 +427,14 @@ def read_function(u: dolfinx.fem.Function, filename: Path, engine: str = "BP4", def read_mesh( - comm: MPI.Intracomm, file: Path, engine: str, ghost_mode: dolfinx.mesh.GhostMode + comm: MPI.Intracomm, filename: Union[Path, str], engine: str, ghost_mode: dolfinx.mesh.GhostMode ) -> dolfinx.mesh.Mesh: """ Read an ADIOS2 mesh into DOLFINx. Args: comm: The MPI communciator to distribute the mesh over - file: Path to input file + filename: Path to input file engine: ADIOS engine to use for reading (BP4, BP5 or HDF5) ghost_mode: Ghost mode to use for mesh Returns: @@ -443,26 +443,26 @@ def read_mesh( adios = adios2.ADIOS(comm) io = adios.DeclareIO("MeshReader") io.SetEngine(engine) - infile = io.Open(str(file), adios2.Mode.Read) + infile = io.Open(str(filename), adios2.Mode.Read) infile.BeginStep() # Get mesh cell type if "CellType" not in io.AvailableAttributes().keys(): - raise KeyError(f"Mesh cell type not found at CellType in {file}") + raise KeyError(f"Mesh cell type not found at CellType in {filename}") celltype = io.InquireAttribute("CellType") cell_type = celltype.DataString()[0] # Get basix info if "LagrangeVariant" not in io.AvailableAttributes().keys(): - raise KeyError(f"Mesh LagrangeVariant not found in {file}") + raise KeyError(f"Mesh LagrangeVariant not found in {filename}") lvar = io.InquireAttribute("LagrangeVariant").Data()[0] if "Degree" not in io.AvailableAttributes().keys(): - raise KeyError(f"Mesh degree not found in {file}") + raise KeyError(f"Mesh degree not found in {filename}") degree = io.InquireAttribute("Degree").Data()[0] # Get mesh geometry if "Points" not in io.AvailableVariables().keys(): - raise KeyError(f"Mesh coordinates not found at Points in {file}") + raise KeyError(f"Mesh coordinates not found at Points in {filename}") geometry = io.InquireVariable("Points") x_shape = geometry.Shape() geometry_range = compute_local_range(comm, x_shape[0]) @@ -476,7 +476,7 @@ def read_mesh( infile.Get(geometry, mesh_geometry, adios2.Mode.Deferred) # Get mesh topology (distributed) if "Topology" not in io.AvailableVariables().keys(): - raise KeyError("Mesh topology not found at Topology in {file}") + raise KeyError(f"Mesh topology not found at Topology in {filename}") topology = io.InquireVariable("Topology") shape = topology.Shape() local_range = compute_local_range(comm, shape[0]) @@ -510,7 +510,7 @@ def read_mesh( def write_function( u: dolfinx.fem.Function, - filename: Path, + filename: Union[Path, str], engine: str = "BP4", mode: adios2.Mode = adios2.Mode.Append, time: float = 0.0 From 11eb64069f9e034cac0ad1280e55aab709b3335c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Fri, 1 Mar 2024 14:17:31 +0100 Subject: [PATCH 12/49] Checkpointing to original mesh (#70) * Start work on checkpointing onto original mesh * More work on checkpoint * Logic fixes in input sorting * Fix bug in compute_local_range and fixes to native checkpoint * Add more code * Works for non-dofpermutation problems. * Use cell permutation of dofmap * Add more clean code. Should add more support structures for writing meshes * Rewrite original function writer. Test coming up tomorow * Various minor fixes to get debug code to run * Add tests for: "write_parallel" -> "read_serial" and "write_serial"->"read_parallel" * Remove debug file * black format * Mypy fixes * Add test dependency * Reduce number of processes * Various fixes of integer type and missing ranks. * Update readme * Add assert to ensure parallel execution in test * Move assert to correct function * Unify writers for mesh and function for different checkpoints. Improves maintainablility and consistency. Move CellPermutation data into given function step (as a mesh does not need this info). * Ruff formatting * Use ruff formatting * Add some converting of input strings to paths * Ruff fixes on import * Remove duplicated code (computing insert position when packing data to send). * Fix input argument order * Fix ompi communication of sizes * Fix all numpy warnings * Add code of conduct, contributor guidelines and remove some more warnings. * Fix email --- .github/workflows/check_formatting.yml | 6 +- CODE_OF_CONDUCT.md | 65 +++ README.md | 46 +- pyproject.toml | 45 +- src/adios4dolfinx/__init__.py | 18 +- src/adios4dolfinx/adios2_helpers.py | 53 ++- src/adios4dolfinx/checkpointing.py | 397 +++++++---------- src/adios4dolfinx/comm_helpers.py | 159 ++----- src/adios4dolfinx/legacy_readers.py | 114 +++-- src/adios4dolfinx/original_checkpoint.py | 354 +++++++++++++++ src/adios4dolfinx/snapshot.py | 57 +++ src/adios4dolfinx/structures.py | 47 ++ src/adios4dolfinx/utils.py | 92 +++- src/adios4dolfinx/writers.py | 161 +++++++ tests/create_legacy_checkpoint.py | 20 +- tests/create_legacy_data.py | 48 +- tests/test_checkpointing.py | 142 +++--- tests/test_checkpointing_vector.py | 91 ++-- tests/test_legacy_readers.py | 30 +- tests/test_mesh_writer.py | 12 +- tests/test_meshtags.py | 21 +- tests/test_numpy_vectorization.py | 50 ++- tests/test_original_checkpoint.py | 540 +++++++++++++++++++++++ tests/test_snapshot_checkpoint.py | 16 +- tests/test_utils.py | 16 +- 25 files changed, 1918 insertions(+), 682 deletions(-) create mode 100644 CODE_OF_CONDUCT.md create mode 100644 src/adios4dolfinx/original_checkpoint.py create mode 100644 src/adios4dolfinx/snapshot.py create mode 100644 src/adios4dolfinx/structures.py create mode 100644 src/adios4dolfinx/writers.py create mode 100644 tests/test_original_checkpoint.py diff --git a/.github/workflows/check_formatting.yml b/.github/workflows/check_formatting.yml index f41f293..102ebe9 100644 --- a/.github/workflows/check_formatting.yml +++ b/.github/workflows/check_formatting.yml @@ -21,8 +21,10 @@ jobs: - name: Install code run: python3 -m pip install .[dev] - - name: Flake8 code - run: python3 -m flake8 -v + - name: Check code formatting with ruff + run: | + ruff check . + ruff format --check . - name: Mypy check run: python3 -m mypy -v diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..bb881f8 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,65 @@ + +# Code of Conduct +### Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +### Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +### Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +### Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at dokken@simula.no. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +### Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/README.md b/README.md index 8f1271d..98524e2 100644 --- a/README.md +++ b/README.md @@ -38,9 +38,10 @@ python3 -m pip install git+https://github.com/jorgensd/adios4dolfinx@v0.7.2 - Reading and writing meshes, using `adios4dolfinx.read/write_mesh` - Reading and writing meshtags associated to meshes `adios4dolfinx.read/write_meshtags` - Reading checkpoints for any element (serial and parallel, arbitrary number of functions and timesteps per file). Use `adios4dolfinx.read/write_function`. +- Writing standalone function checkpoints relating to "original meshes", i.e. meshes read from `XDMFFile`. Use `adios4dolfinx.write_function_on_input_mesh` for this. > [!IMPORTANT] -> For a checkpoint to be valid, you first have to store the mesh with `write_mesh`, then use `write_function` to append to the checkpoint file. +> For checkpoints written with `write_function` to be valid, you first have to store the mesh with `write_mesh` to the checkpoint file. > [!IMPORTANT] > A checkpoint file supports multiple functions and multiple time steps, as long as the functions are associated with the same mesh @@ -66,3 +67,46 @@ See the [API](./docs/api) for more information. ## Long term plan The long term plan is to get this library merged into DOLFINx (rewritten in C++ with appropriate Python-bindings). + +# Contributor guidelines +When contributing to this repository, please first [create an issue](https://github.com/jorgensd/adios4dolfinx/issues/new/choose) containing information about the missing feature or the bug that you would like to fix. Here you can discuss the change you want to make with the maintainers of the repository. + +Please note we have a code of conduct, please follow it in all your interactions with the project. + +## New contributor guide + +To get an overview of the project, read the [documentation](https://jorgensd.github.io/adios4dolfinx). Here are some resources to help you get started with open source contributions: + +- [Finding ways to contribute to open source on GitHub](https://docs.github.com/en/get-started/exploring-projects-on-github/finding-ways-to-contribute-to-open-source-on-github) +- [Set up Git](https://docs.github.com/en/get-started/quickstart/set-up-git) +- [GitHub flow](https://docs.github.com/en/get-started/quickstart/github-flow) +- [Collaborating with pull requests](https://docs.github.com/en/github/collaborating-with-pull-requests) + +## Pull Request Process + + +### Pull Request + +- When you're finished with the changes, create a pull request, also known as a PR. It is also OK to create a [draft pull request](https://github.blog/2019-02-14-introducing-draft-pull-requests/) from the very beginning. Once you are done you can click on the ["Ready for review"] button. You can also [request a review](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/requesting-a-pull-request-review) from one of the maintainers. +- Don't forget to [link PR to the issue that you opened ](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue). +- Enable the checkbox to [allow maintainer edits](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/allowing-changes-to-a-pull-request-branch-created-from-a-fork) so the branch can be updated for a merge. +Once you submit your PR, a team member will review your proposal. We may ask questions or request for additional information. +- We may ask for changes to be made before a PR can be merged, either using [suggested changes](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/incorporating-feedback-in-your-pull-request) or pull request comments. You can apply suggested changes directly through the UI. You can make any other changes in your fork, then commit them to your branch. +- As you update your PR and apply changes, mark each conversation as [resolved](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/commenting-on-a-pull-request#resolving-conversations). +- If you run into any merge issues, checkout this [git tutorial](https://lab.github.com/githubtraining/managing-merge-conflicts) to help you resolve merge conflicts and other issues. +- Please make sure that all tests are passing, github pages renders nicely, and code coverage are are not lower than before your contribution. You see the different github action workflows by clicking the "Action" tab in the GitHub repository. + +Note that for a pull request to be accepted, it has to pass all the tests on CI, which includes: +- `mypy`: typechecking +- `ruff`: Code formatting +- `pytest`: Successfull execution of all tests in the `tests` folder. + + +### Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. diff --git a/pyproject.toml b/pyproject.toml index 51d61a8..afe2b7b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,8 +11,8 @@ readme = "README.md" dependencies = ["fenics-dolfinx>=0.8.0.dev0"] [project.optional-dependencies] -test = ["pytest", "coverage"] -dev = ["pdbpp", "ipython", "mypy", "flake8"] +test = ["pytest", "coverage", "ipyparallel"] +dev = ["pdbpp", "ipython", "mypy", "ruff"] docs = ["jupyter-book"] all = ["adios4dolfinx[test]", "adios4dolfinx[dev]", "adios4dolfinx[docs]"] @@ -27,11 +27,26 @@ exclude = ["docs/", "build/"] # Folder to check with mypy files = ["src", "tests"] +[tool.ruff] +src = ["src", "tests"] +line-length = 100 +indent-width = 4 -[tool.isort] -src_paths = ["src", "tests"] -known_first_party = "adios4dolfinx" -known_third_party = [ +[tool.ruff.lint] +select = [ + # Pyflakes + "F", + # Pycodestyle + "E", + "W", + # isort + "I001" +] + + +[tool.ruff.lint.isort] +known-first-party = ["adios4dolfinx"] +known-third-party = [ "basix", "dolfinx", "ffcx", @@ -40,12 +55,14 @@ known_third_party = [ "numpy", "pytest", ] -known_mpi = ["mpi4py", "petsc4py"] -sections = [ - "FUTURE", - "STDLIB", - "MPI", - "THIRDPARTY", - "FIRSTPARTY", - "LOCALFOLDER", +section-order = [ + "future", + "standard-library", + "mpi", + "third-party", + "first-party", + "local-folder", ] + +[tool.ruff.lint.isort.sections] +"mpi" = ["mpi4py", "petsc4py"] \ No newline at end of file diff --git a/src/adios4dolfinx/__init__.py b/src/adios4dolfinx/__init__.py index 94b1075..1c97857 100644 --- a/src/adios4dolfinx/__init__.py +++ b/src/adios4dolfinx/__init__.py @@ -7,11 +7,17 @@ """Top-level package for ADIOS2Wrappers.""" from importlib.metadata import metadata -from .checkpointing import (read_function, read_mesh, read_meshtags, - snapshot_checkpoint, write_function, write_mesh, - write_meshtags) -from .legacy_readers import (read_function_from_legacy_h5, - read_mesh_from_legacy_h5) +from .checkpointing import ( + read_function, + read_mesh, + read_meshtags, + write_function, + write_mesh, + write_meshtags, +) +from .legacy_readers import read_function_from_legacy_h5, read_mesh_from_legacy_h5 +from .original_checkpoint import write_function_on_input_mesh, write_mesh_input_order +from .snapshot import snapshot_checkpoint meta = metadata("adios4dolfinx") __version__ = meta["Version"] @@ -30,4 +36,6 @@ "write_function", "read_function", "snapshot_checkpoint", + "write_function_on_input_mesh", + "write_mesh_input_order", ] diff --git a/src/adios4dolfinx/adios2_helpers.py b/src/adios4dolfinx/adios2_helpers.py index 62072c9..ead0ed1 100644 --- a/src/adios4dolfinx/adios2_helpers.py +++ b/src/adios4dolfinx/adios2_helpers.py @@ -1,8 +1,9 @@ from pathlib import Path -from typing import Tuple -from typing import Union +from typing import Tuple, Union + from mpi4py import MPI +import adios2 import dolfinx.cpp.graph import dolfinx.graph import numpy as np @@ -10,8 +11,6 @@ from .utils import compute_local_range, valid_function_types -import adios2 - def resolve_adios_scope(adios2): return adios2.bindings if hasattr(adios2, "bindings") else adios2 @@ -25,9 +24,13 @@ def resolve_adios_scope(adios2): __all__ = ["read_array", "read_dofmap", "read_cell_perms", "adios_to_numpy_dtype"] -adios_to_numpy_dtype = {"float": np.float32, "double": np.float64, - "float complex": np.complex64, "double complex": np.complex128, - "uint32_t": np.uint32} +adios_to_numpy_dtype = { + "float": np.float32, + "double": np.float64, + "float complex": np.complex64, + "double complex": np.complex128, + "uint32_t": np.uint32, +} def read_cell_perms( @@ -79,11 +82,10 @@ def read_cell_perms( # Get local selection local_cell_range = compute_local_range(comm, num_cells_global) - perm_var.SetSelection( - [[local_cell_range[0]], [local_cell_range[1] - local_cell_range[0]]] - ) + perm_var.SetSelection([[local_cell_range[0]], [local_cell_range[1] - local_cell_range[0]]]) in_perm = np.empty( - local_cell_range[1] - local_cell_range[0], dtype=adios_to_numpy_dtype[perm_var.Type()] + local_cell_range[1] - local_cell_range[0], + dtype=adios_to_numpy_dtype[perm_var.Type()], ) infile.Get(perm_var, in_perm, adios2.Mode.Sync) infile.EndStep() @@ -129,7 +131,8 @@ def read_dofmap( io.SetEngine(engine) infile = io.Open(str(filename), adios2.Mode.Read) - # First find step with dofmap offsets, to be able to read in a full row of the dofmap + # First find step with dofmap offsets, to be able to read + # in a full row of the dofmap for i in range(infile.Steps()): infile.BeginStep() if dofmap_offsets in io.AvailableVariables().keys(): @@ -143,9 +146,7 @@ def read_dofmap( shape = d_offsets.Shape() assert len(shape) == 1 # As the offsets are one longer than the number of cells, we need to read in with an overlap - d_offsets.SetSelection( - [[local_cell_range[0]], [local_cell_range[1] + 1 - local_cell_range[0]]] - ) + d_offsets.SetSelection([[local_cell_range[0]], [local_cell_range[1] + 1 - local_cell_range[0]]]) in_offsets = np.empty( local_cell_range[1] + 1 - local_cell_range[0], dtype=d_offsets.Type().strip("_t"), @@ -158,9 +159,7 @@ def read_dofmap( raise KeyError(f"Dof offsets not found at {dofmap} in {filename}") cell_dofs = io.InquireVariable(dofmap) cell_dofs.SetSelection([[in_offsets[0]], [in_offsets[-1] - in_offsets[0]]]) - in_dofmap = np.empty( - in_offsets[-1] - in_offsets[0], dtype=cell_dofs.Type().strip("_t") - ) + in_dofmap = np.empty(in_offsets[-1] - in_offsets[0], dtype=cell_dofs.Type().strip("_t")) infile.Get(cell_dofs, in_dofmap, adios2.Mode.Sync) in_dofmap = in_dofmap.astype(np.int64) @@ -174,10 +173,15 @@ def read_dofmap( def read_array( - adios: adios2.ADIOS, - filename: Union[Path, str], array_name: str, engine: str, comm: MPI.Intracomm, - time: float = 0., time_name: str = "", - legacy: bool = False) -> Tuple[npt.NDArray[valid_function_types], int]: + adios: adios2.ADIOS, + filename: Union[Path, str], + array_name: str, + engine: str, + comm: MPI.Intracomm, + time: float = 0.0, + time_name: str = "", + legacy: bool = False, +) -> Tuple[npt.NDArray[valid_function_types], int]: """ Read an array from file, return the global starting position of the local array @@ -237,7 +241,10 @@ def read_array( vals = np.empty(arr_range[1] - arr_range[0], dtype=adios_to_numpy_dtype[arr.Type()]) else: arr.SetSelection([[arr_range[0], 0], [arr_range[1] - arr_range[0], arr_shape[1]]]) - vals = np.empty((arr_range[1] - arr_range[0], arr_shape[1]), dtype=adios_to_numpy_dtype[arr.Type()]) + vals = np.empty( + (arr_range[1] - arr_range[0], arr_shape[1]), + dtype=adios_to_numpy_dtype[arr.Type()], + ) assert arr_shape[1] == 1 infile.Get(arr, vals, adios2.Mode.Sync) diff --git a/src/adios4dolfinx/checkpointing.py b/src/adios4dolfinx/checkpointing.py index 0d4ea1b..723dad1 100644 --- a/src/adios4dolfinx/checkpointing.py +++ b/src/adios4dolfinx/checkpointing.py @@ -9,19 +9,29 @@ from mpi4py import MPI +import adios2 import basix import dolfinx import numpy as np import ufl -from .adios2_helpers import (adios_to_numpy_dtype, read_array, read_cell_perms, - read_dofmap, resolve_adios_scope) -from .comm_helpers import (send_and_recv_cell_perm, - send_dofmap_and_recv_values, - send_dofs_and_recv_values) +from .adios2_helpers import ( + adios_to_numpy_dtype, + read_array, + read_cell_perms, + read_dofmap, + resolve_adios_scope, +) +from .comm_helpers import ( + send_and_recv_cell_perm, + send_dofmap_and_recv_values, + send_dofs_and_recv_values, +) +from .structures import FunctionData, MeshData from .utils import compute_dofmap_pos, compute_local_range, index_owner, unroll_dofmap +from .writers import write_function as _internal_function_writer +from .writers import write_mesh as _internal_mesh_writer -import adios2 adios2 = resolve_adios_scope(adios2) __all__ = [ @@ -29,139 +39,23 @@ "write_function", "read_function", "write_mesh", - "snapshot_checkpoint", "read_meshtags", - "write_meshtags" + "write_meshtags", ] -def snapshot_checkpoint(uh: dolfinx.fem.Function, filename: Union[Path, str], mode: adios2.Mode): - """Read or write a snapshot checkpoint - - This checkpoint is only meant to be used on the same mesh during the same simulation. - - :param uh: The function to write data from or read to - :param filename: The file to write to or read from - :param mode: Either read or write - """ - # Create ADIOS IO - adios = adios2.ADIOS(uh.function_space.mesh.comm) - io_name = "SnapshotCheckPoint" - io = adios.DeclareIO(io_name) - io.SetEngine("BP4") - if mode not in [adios2.Mode.Write, adios2.Mode.Read]: - raise ValueError("Got invalid mode {mode}") - adios_file = io.Open(str(filename), mode) - - if mode == adios2.Mode.Write: - dofmap = uh.function_space.dofmap - num_dofs_local = dofmap.index_map.size_local * dofmap.index_map_bs - local_dofs = uh.x.array[:num_dofs_local].copy() - - # Write to file - adios_file.BeginStep() - dofs = io.DefineVariable("dofs", local_dofs, count=[num_dofs_local]) - adios_file.Put(dofs, local_dofs, adios2.Mode.Sync) - adios_file.EndStep() - else: - adios_file.BeginStep() - in_variable = io.InquireVariable("dofs") - in_variable.SetBlockSelection(uh.function_space.mesh.comm.rank) - adios_file.Get(in_variable, uh.x.array, adios2.Mode.Sync) - adios_file.EndStep() - uh.x.scatter_forward() - adios_file.Close() - adios.RemoveIO(io_name) - - -def write_mesh(mesh: dolfinx.mesh.Mesh, filename: Path, engine: str = "BP4"): - """ - Write a mesh to specified ADIOS2 format, see: - https://adios2.readthedocs.io/en/stable/engines/engines.html - for possible formats. - - Args: - mesh: The mesh to write to file - filename: Path to save mesh (without file-extension) - engine: Adios2 Engine - """ - num_xdofs_local = mesh.geometry.index_map().size_local - num_xdofs_global = mesh.geometry.index_map().size_global - local_range = mesh.geometry.index_map().local_range - gdim = mesh.geometry.dim - - local_points = mesh.geometry.x[:num_xdofs_local, :gdim].copy() - adios = adios2.ADIOS(mesh.comm) - io = adios.DeclareIO("MeshWriter") - io.SetEngine(engine) - outfile = io.Open(str(filename), adios2.Mode.Write) - # Write geometry - pointvar = io.DefineVariable( - "Points", - local_points, - shape=[num_xdofs_global, gdim], - start=[local_range[0], 0], - count=[num_xdofs_local, gdim], - ) - outfile.Put(pointvar, local_points, adios2.Mode.Sync) - - # Write celltype - io.DefineAttribute("CellType", mesh.topology.cell_name()) - - # Write basix properties - cmap = mesh.geometry.cmap - io.DefineAttribute("Degree", np.array([cmap.degree], dtype=np.int32)) - io.DefineAttribute("LagrangeVariant", np.array([cmap.variant], dtype=np.int32)) - - # Write topology - g_imap = mesh.geometry.index_map() - g_dmap = mesh.geometry.dofmap - num_cells_local = mesh.topology.index_map(mesh.topology.dim).size_local - num_cells_global = mesh.topology.index_map(mesh.topology.dim).size_global - start_cell = mesh.topology.index_map(mesh.topology.dim).local_range[0] - geom_layout = cmap.create_dof_layout() - num_dofs_per_cell = geom_layout.num_entity_closure_dofs(mesh.topology.dim) - - dofs_out = np.zeros((num_cells_local, num_dofs_per_cell), dtype=np.int64) - assert g_dmap.shape[1] == num_dofs_per_cell - dofs_out[:, :] = np.asarray( - g_imap.local_to_global(g_dmap[:num_cells_local, :].reshape(-1)) - ).reshape(dofs_out.shape) - - dvar = io.DefineVariable( - "Topology", - dofs_out, - shape=[num_cells_global, num_dofs_per_cell], - start=[start_cell, 0], - count=[num_cells_local, num_dofs_per_cell], - ) - outfile.Put(dvar, dofs_out) - - # Add mesh permutations - mesh.topology.create_entity_permutations() - cell_perm = mesh.topology.get_cell_permutation_info() - pvar = io.DefineVariable( - "CellPermutations", - cell_perm, - shape=[num_cells_global], - start=[start_cell], - count=[num_cells_local], - ) - outfile.Put(pvar, cell_perm) - outfile.PerformPuts() - outfile.EndStep() - outfile.Close() - assert adios.RemoveIO("MeshWriter") - - -def write_meshtags(filename: Union[Path, str], mesh: dolfinx.mesh.Mesh, meshtags: dolfinx.mesh.MeshTags, - engine: Optional[str] = "BP4"): +def write_meshtags( + filename: Union[Path, str], + mesh: dolfinx.mesh.Mesh, + meshtags: dolfinx.mesh.MeshTags, + engine: Optional[str] = "BP4", +): """ Write meshtags associated with input mesh to file. .. note:: - For this checkpoint to work, the mesh must be written to file using :func:`write_mesh` - before calling this function. + For this checkpoint to work, the mesh must be written to file + using :func:`write_mesh` before calling this function. Args: filename: Path to save meshtags (with file-extension) @@ -173,7 +67,7 @@ def write_meshtags(filename: Union[Path, str], mesh: dolfinx.mesh.Mesh, meshtags dim = meshtags.dim num_tag_entities_local = mesh.topology.index_map(dim).size_local local_tag_entities = tag_entities[tag_entities < num_tag_entities_local] - local_values = meshtags.values[:len(local_tag_entities)] + local_values = meshtags.values[: len(local_tag_entities)] num_saved_tag_entities = len(local_tag_entities) local_start = mesh.comm.exscan(num_saved_tag_entities, op=MPI.SUM) @@ -183,7 +77,8 @@ def write_meshtags(filename: Union[Path, str], mesh: dolfinx.mesh.Mesh, meshtags num_dofs_per_entity = dof_layout.num_entity_closure_dofs(dim) entities_to_geometry = dolfinx.cpp.mesh.entities_to_geometry( - mesh._cpp_object, dim, tag_entities, False) + mesh._cpp_object, dim, tag_entities, False + ) indices = mesh.geometry.index_map().local_to_global(entities_to_geometry.reshape(-1)) @@ -193,7 +88,7 @@ def write_meshtags(filename: Union[Path, str], mesh: dolfinx.mesh.Mesh, meshtags outfile = io.Open(str(filename), adios2.Mode.Append) # Write meshtag topology topology_var = io.DefineVariable( - meshtags.name+"_topology", + meshtags.name + "_topology", indices, shape=[global_num_tag_entities, num_dofs_per_entity], start=[local_start, 0], @@ -203,7 +98,7 @@ def write_meshtags(filename: Union[Path, str], mesh: dolfinx.mesh.Mesh, meshtags # Write meshtag topology values_var = io.DefineVariable( - meshtags.name+"_values", + meshtags.name + "_values", local_values, shape=[global_num_tag_entities], start=[local_start], @@ -219,8 +114,12 @@ def write_meshtags(filename: Union[Path, str], mesh: dolfinx.mesh.Mesh, meshtags outfile.Close() -def read_meshtags(filename: Union[Path, str], mesh: dolfinx.mesh.Mesh, meshtag_name: str, - engine: str = "BP4") -> dolfinx.mesh.MeshTags: +def read_meshtags( + filename: Union[Path, str], + mesh: dolfinx.mesh.Mesh, + meshtag_name: str, + engine: str = "BP4", +) -> dolfinx.mesh.MeshTags: """ Read meshtags from file and return a :class:`dolfinx.mesh.MeshTags` object. @@ -281,9 +180,7 @@ def read_meshtags(filename: Union[Path, str], mesh: dolfinx.mesh.Mesh, meshtag_n values = io.InquireVariable(values_name) val_shape = values.Shape() assert val_shape[0] == top_shape[0] - values.SetSelection( - [[topology_range[0]], [topology_range[1] - topology_range[0]]] - ) + values.SetSelection([[topology_range[0]], [topology_range[1] - topology_range[0]]]) tag_values = np.empty((topology_range[1] - topology_range[0]), dtype=np.int32) infile.Get(values, tag_values, adios2.Mode.Deferred) @@ -293,7 +190,8 @@ def read_meshtags(filename: Union[Path, str], mesh: dolfinx.mesh.Mesh, meshtag_n assert adios.RemoveIO("MeshTagsReader") local_entities, local_values = dolfinx.cpp.io.distribute_entity_data( - mesh._cpp_object, int(dim), mesh_entities, tag_values) + mesh._cpp_object, int(dim), mesh_entities, tag_values + ) mesh.topology.create_connectivity(dim, 0) mesh.topology.create_connectivity(dim, mesh.topology.dim) @@ -307,8 +205,13 @@ def read_meshtags(filename: Union[Path, str], mesh: dolfinx.mesh.Mesh, meshtag_n return mt -def read_function(u: dolfinx.fem.Function, filename: Union[Path, str], engine: str = "BP4", time: float = 0., - legacy: bool = False): +def read_function( + u: dolfinx.fem.Function, + filename: Union[Path, str], + engine: str = "BP4", + time: float = 0.0, + legacy: bool = False, +): """ Read checkpoint from file and fill it into `u`. @@ -336,9 +239,7 @@ def read_function(u: dolfinx.fem.Function, filename: Union[Path, str], engine: s # -------------------Step 2------------------------------------ # Send and receive global cell index and cell perm - inc_cells, inc_perms = send_and_recv_cell_perm( - input_cells, cell_perm, owners, mesh.comm - ) + inc_cells, inc_perms = send_and_recv_cell_perm(input_cells, cell_perm, owners, mesh.comm) # -------------------Step 3----------------------------------- # Read dofmap from file and compute dof owners @@ -353,8 +254,7 @@ def read_function(u: dolfinx.fem.Function, filename: Union[Path, str], engine: s ) # Compute owner of dofs in dofmap num_dofs_global = ( - u.function_space.dofmap.index_map.size_global - * u.function_space.dofmap.index_map_bs + u.function_space.dofmap.index_map.size_global * u.function_space.dofmap.index_map_bs ) dof_owner = index_owner(comm, input_dofmap.array, num_dofs_global) @@ -365,8 +265,9 @@ def read_function(u: dolfinx.fem.Function, filename: Union[Path, str], engine: s else: array_path = f"{name}_values" time_name = f"{name}_time" - input_array, starting_pos = read_array(adios, filename, array_path, engine, comm, time, time_name, - legacy=legacy) + input_array, starting_pos = read_array( + adios, filename, array_path, engine, comm, time, time_name, legacy=legacy + ) recv_array = send_dofs_and_recv_values( input_dofmap.array, dof_owner, comm, input_array, starting_pos ) @@ -387,14 +288,14 @@ def read_function(u: dolfinx.fem.Function, filename: Union[Path, str], engine: s # First invert input data to reference element then transform to current mesh for i, l_cell in enumerate(input_local_cell_index): - start, end = input_dofmap.offsets[l_cell:l_cell+2] - # FIXME: Tempoary cast uint32 to integer as transformations doesn't support uint32 with the switch - # to nanobind + start, end = input_dofmap.offsets[l_cell : l_cell + 2] + # FIXME: Tempoary cast uint32 to integer as transformations + # doesn't support uint32 with the switch to nanobind element.pre_apply_transpose_dof_transformation( - recv_array[int(start):int(end)], int(input_perms[l_cell]), bs + recv_array[int(start) : int(end)], int(input_perms[l_cell]), bs ) element.pre_apply_inverse_transpose_dof_transformation( - recv_array[int(start):int(end)], int(inc_perms[i]), bs + recv_array[int(start) : int(end)], int(inc_perms[i]), bs ) # ------------------Step 6---------------------------------------- # For each dof owned by a process, find the local position in the dofmap. @@ -403,7 +304,7 @@ def read_function(u: dolfinx.fem.Function, filename: Union[Path, str], engine: s input_cells = V.mesh.topology.original_cell_index[local_cells] num_cells_global = V.mesh.topology.index_map(V.mesh.topology.dim).size_global owners = index_owner(V.mesh.comm, input_cells, num_cells_global) - unique_owners = np.unique(owners) + unique_owners, owner_count = np.unique(owners, return_counts=True) # FIXME: In C++ use NBX to find neighbourhood sub_comm = V.mesh.comm.Create_dist_graph( [V.mesh.comm.rank], [len(unique_owners)], unique_owners, reorder=False @@ -416,6 +317,7 @@ def read_function(u: dolfinx.fem.Function, filename: Union[Path, str], engine: s np.asarray(source, dtype=np.int32), np.asarray(dest, dtype=np.int32), owners, + owner_count.astype(np.int32), input_cells, dof_pos, num_cells_global, @@ -427,7 +329,10 @@ def read_function(u: dolfinx.fem.Function, filename: Union[Path, str], engine: s def read_mesh( - comm: MPI.Intracomm, filename: Union[Path, str], engine: str, ghost_mode: dolfinx.mesh.GhostMode + comm: MPI.Intracomm, + filename: Union[Path, str], + engine: str, + ghost_mode: dolfinx.mesh.GhostMode, ) -> dolfinx.mesh.Mesh: """ Read an ADIOS2 mesh into DOLFINx. @@ -480,12 +385,8 @@ def read_mesh( topology = io.InquireVariable("Topology") shape = topology.Shape() local_range = compute_local_range(comm, shape[0]) - topology.SetSelection( - [[local_range[0], 0], [local_range[1] - local_range[0], shape[1]]] - ) - mesh_topology = np.empty( - (local_range[1] - local_range[0], shape[1]), dtype=np.int64 - ) + topology.SetSelection([[local_range[0], 0], [local_range[1] - local_range[0], shape[1]]]) + mesh_topology = np.empty((local_range[1] - local_range[0], shape[1]), dtype=np.int64) infile.Get(topology, mesh_topology, adios2.Mode.Deferred) infile.PerformGets() @@ -503,8 +404,60 @@ def read_mesh( ) domain = ufl.Mesh(element) partitioner = dolfinx.cpp.mesh.create_cell_partitioner(ghost_mode) - return dolfinx.mesh.create_mesh( - comm, mesh_topology, mesh_geometry, domain, partitioner + return dolfinx.mesh.create_mesh(comm, mesh_topology, mesh_geometry, domain, partitioner) + + +def write_mesh(mesh: dolfinx.mesh.Mesh, filename: Path, engine: str = "BP4"): + """ + Write a mesh to specified ADIOS2 format, see: + https://adios2.readthedocs.io/en/stable/engines/engines.html + for possible formats. + + Args: + mesh: The mesh to write to file + filename: Path to save mesh (without file-extension) + engine: Adios2 Engine + """ + num_xdofs_local = mesh.geometry.index_map().size_local + num_xdofs_global = mesh.geometry.index_map().size_global + geometry_range = mesh.geometry.index_map().local_range + gdim = mesh.geometry.dim + + # Convert local connectivity to globa l connectivity + g_imap = mesh.geometry.index_map() + g_dmap = mesh.geometry.dofmap + num_cells_local = mesh.topology.index_map(mesh.topology.dim).size_local + num_cells_global = mesh.topology.index_map(mesh.topology.dim).size_global + cell_range = mesh.topology.index_map(mesh.topology.dim).local_range + cmap = mesh.geometry.cmap + geom_layout = cmap.create_dof_layout() + num_dofs_per_cell = geom_layout.num_entity_closure_dofs(mesh.topology.dim) + dofs_out = np.zeros((num_cells_local, num_dofs_per_cell), dtype=np.int64) + assert g_dmap.shape[1] == num_dofs_per_cell + dofs_out[:, :] = np.asarray( + g_imap.local_to_global(g_dmap[:num_cells_local, :].reshape(-1)) + ).reshape(dofs_out.shape) + + mesh_data = MeshData( + local_geometry=mesh.geometry.x[:num_xdofs_local, :gdim].copy(), + local_geometry_pos=geometry_range, + num_nodes_global=num_xdofs_global, + local_topology=dofs_out, + local_topology_pos=cell_range, + num_cells_global=num_cells_global, + cell_type=mesh.topology.cell_name(), + degree=mesh.geometry.cmap.degree, + lagrange_variant=mesh.geometry.cmap.variant, + ) + + # NOTE: Mode will become input again once we have variable geometry + _internal_mesh_writer( + mesh.comm, + mesh_data, + filename, + engine, + mode=adios2.Mode.Write, + io_name="MeshWriter", ) @@ -513,7 +466,7 @@ def write_function( filename: Union[Path, str], engine: str = "BP4", mode: adios2.Mode = adios2.Mode.Append, - time: float = 0.0 + time: float = 0.0, ): """ Write function checkpoint to file. @@ -529,68 +482,16 @@ def write_function( values = u.x.array mesh = u.function_space.mesh comm = mesh.comm - - adios = adios2.ADIOS(comm) - io = adios.DeclareIO("FunctionWriter") - io.SetEngine(engine) - - # If mode is append, check if we have written the function to file before - name = u.name - if not Path(filename).exists(): - mode = adios2.Mode.Write - - first_write = True - if mode == adios2.Mode.Append: - # First open the file in read-mode to check if the function has been written before - read_file = io.Open(str(filename), adios2.Mode.Read) - io.SetEngine(engine) - for _ in range(read_file.Steps()): - read_file.BeginStep() - if name in io.AvailableAttributes(): - first_write = False - break - read_file.EndStep() - read_file.Close() - outfile = io.Open(str(filename), mode) - io.DefineAttribute(name, name) - outfile.BeginStep() - - # Add time step to file - t_arr = np.array([time], dtype=np.float64) - time_var = io.DefineVariable( - f"{name}_time", - t_arr, - shape=[1], - start=[0], - count=[1 if mesh.comm.rank == 0 else 0], - ) - outfile.Put(time_var, t_arr) - - # Write local part of vector - num_dofs_local = dofmap.index_map.size_local * dofmap.index_map_bs - num_dofs_global = dofmap.index_map.size_global * dofmap.index_map_bs - local_start = dofmap.index_map.local_range[0] * dofmap.index_map_bs - val_var = io.DefineVariable( - f"{name}_values", - np.zeros(num_dofs_local, dtype=u.dtype), - shape=[num_dofs_global], - start=[local_start], - count=[num_dofs_local], - ) - outfile.Put(val_var, values[:num_dofs_local]) - - if not first_write: - outfile.PerformPuts() - outfile.EndStep() - outfile.Close() - assert adios.RemoveIO("FunctionWriter") - return + mesh.topology.create_entity_permutations() + cell_perm = mesh.topology.get_cell_permutation_info() + num_cells_local = mesh.topology.index_map(mesh.topology.dim).size_local + local_cell_range = mesh.topology.index_map(mesh.topology.dim).local_range + num_cells_global = mesh.topology.index_map(mesh.topology.dim).size_global # Convert local dofmap into global_dofmap dmap = dofmap.list num_dofs_per_cell = dmap.shape[1] dofmap_bs = dofmap.bs - num_cells_local = mesh.topology.index_map(mesh.topology.dim).size_local num_dofs_local_dmap = num_cells_local * num_dofs_per_cell * dofmap_bs index_map_bs = dofmap.index_map_bs @@ -599,36 +500,34 @@ def write_function( dmap_loc = (unrolled_dofmap // index_map_bs).reshape(-1) dmap_rem = (unrolled_dofmap % index_map_bs).reshape(-1) - local_dofmap_offsets = np.arange(num_cells_local + 1, dtype=np.int64) - local_dofmap_offsets[:] *= num_dofs_per_cell * dofmap_bs - # Convert imap index to global index imap_global = dofmap.index_map.local_to_global(dmap_loc) dofmap_global = imap_global * index_map_bs + dmap_rem - - # Get offsets of dofmap dofmap_imap = dolfinx.common.IndexMap(mesh.comm, num_dofs_local_dmap) - dofmap_var = io.DefineVariable( - f"{name}_dofmap", - np.zeros(num_dofs_local_dmap, dtype=np.int64), - shape=[dofmap_imap.size_global], - start=[dofmap_imap.local_range[0]], - count=[dofmap_imap.size_local], - ) - outfile.Put(dofmap_var, dofmap_global) - num_cells_global = mesh.topology.index_map(mesh.topology.dim).size_global - cell_start = mesh.topology.index_map(mesh.topology.dim).local_range[0] + # Compute dofmap offsets + local_dofmap_offsets = np.arange(num_cells_local + 1, dtype=np.int64) + local_dofmap_offsets[:] *= num_dofs_per_cell * dofmap_bs local_dofmap_offsets += dofmap_imap.local_range[0] - xdofmap_var = io.DefineVariable( - f"{name}_XDofmap", - np.zeros(num_cells_local + 1, dtype=np.int64), - shape=[num_cells_global + 1], - start=[cell_start], - count=[num_cells_local + 1], + + num_dofs_global = dofmap.index_map.size_global * dofmap.index_map_bs + local_dof_range = np.asarray(dofmap.index_map.local_range) * dofmap.index_map_bs + num_dofs_local = local_dof_range[1] - local_dof_range[0] + + # Create internal data structure for function data to write to file + function_data = FunctionData( + cell_permutations=cell_perm[:num_cells_local].copy(), + local_cell_range=local_cell_range, + num_cells_global=num_cells_global, + dofmap_array=dofmap_global, + dofmap_offsets=local_dofmap_offsets, + dofmap_range=dofmap_imap.local_range, + global_dofs_in_dofmap=dofmap_imap.size_global, + values=values[:num_dofs_local].copy(), + dof_range=local_dof_range, + num_dofs_global=num_dofs_global, + name=u.name, ) - outfile.Put(xdofmap_var, local_dofmap_offsets) - outfile.PerformPuts() - outfile.EndStep() - outfile.Close() - assert adios.RemoveIO("FunctionWriter") + # Write to file + fname = Path(filename) + _internal_function_writer(comm, function_data, fname, engine, mode, time, "FunctionWriter") diff --git a/src/adios4dolfinx/comm_helpers.py b/src/adios4dolfinx/comm_helpers.py index 3cbc61f..a0fd70b 100644 --- a/src/adios4dolfinx/comm_helpers.py +++ b/src/adios4dolfinx/comm_helpers.py @@ -5,19 +5,24 @@ import numpy as np import numpy.typing as npt -from .utils import compute_local_range, valid_function_types +from .utils import compute_insert_position, compute_local_range, valid_function_types __all__ = [ "send_dofmap_and_recv_values", "send_and_recv_cell_perm", "send_dofs_and_recv_values", + "numpy_to_mpi", ] """ Helpers for sending and receiving values for checkpointing """ -numpy_to_mpi = {np.float64: MPI.DOUBLE, np.float32: MPI.FLOAT, - np.complex64: MPI.COMPLEX, np.complex128: MPI.DOUBLE_COMPLEX} +numpy_to_mpi = { + np.float64: MPI.DOUBLE, + np.float32: MPI.FLOAT, + np.complex64: MPI.COMPLEX, + np.complex128: MPI.DOUBLE_COMPLEX, +} def send_dofmap_and_recv_values( @@ -25,6 +30,7 @@ def send_dofmap_and_recv_values( source_ranks: npt.NDArray[np.int32], dest_ranks: npt.NDArray[np.int32], output_owners: npt.NDArray[np.int32], + dest_size: npt.NDArray[np.int32], input_cells: npt.NDArray[np.int64], dofmap_pos: npt.NDArray[np.int32], num_cells_global: np.int64, @@ -39,62 +45,40 @@ def send_dofmap_and_recv_values( comm: The MPI communicator to create the Neighbourhood-communicator from source_ranks: Ranks that will send dofmap indices to current process dest_ranks: Ranks that will receive dofmap indices from current process - output_owners: The owners of each dofmap entry on this process. The unique set of these entries - should be the same as the dest_ranks. + output_owners: The owners of each dofmap entry on this process. The unique set of + these entries should be the same as the dest_ranks. + dest_size: The number of entries sent to each owner input_cells: A cell associated with the degree of freedom sent (global index). - dofmap_pos: The local position in the dofmap. I.e. `dof = dofmap.links(input_cells)[dofmap_pos]` + dofmap_pos: The local position in the dofmap. I.e. + `dof = dofmap.links(input_cells)[dofmap_pos]` num_cells_global: Number of global cells - values: Values currently held by this process. These are ordered (num_cells_local, num_dofs_per_cell), - flattened row-major. + values: Values currently held by this process. These are + ordered (num_cells_local, num_dofs_per_cell), flattened row-major. dofmap_offsets: Local dofmap offsets to access the correct `values`. Returns: Values corresponding to the dofs owned by this process. """ - - # This becomes a (num_dofs, num_dest_ranks) matrix where the (i,j) entry - # is True if dof i is owned by dest_rank_j - owners_transposed = output_owners.reshape(-1, 1) - process_pos_indicator = (owners_transposed == dest_ranks) - - # Compute number of dofs owned by each rank - out_size = np.count_nonzero(process_pos_indicator, axis=0).astype(np.int32) - - # Compute send offset based of number of dofs owned by each rank - offsets = np.zeros(len(out_size) + 1, dtype=np.intc) - offsets[1:] = np.cumsum(out_size) - - # As each dof can only be owned by one process, this returns what column - # the ith dof is owned by - proc_row, proc_col = np.nonzero(process_pos_indicator) - assert np.allclose(proc_row, np.arange(len(process_pos_indicator), dtype=np.int32)) - - # For a dof owned by process j, get its position in the list of dofs owned by this process - cum_pos = np.cumsum(process_pos_indicator, axis=0) - - # Get the insert position (relative to process) for each dof. Subtract 0 as first occurence is equal to 1) due - # to the cumsum above - insert_position = cum_pos[proc_row, proc_col] - 1 - # Compute aboslute insert position - insert_position += offsets[proc_col] + insert_position = compute_insert_position(output_owners, dest_ranks, dest_size) # Pack the cells and dofmap position for all dofs this process is distributing - out_cells = np.zeros(offsets[-1], dtype=np.int64) + out_cells = np.zeros(len(output_owners), dtype=np.int64) out_cells[insert_position] = input_cells - out_pos = np.zeros(offsets[-1], dtype=np.int32) + out_pos = np.zeros(len(output_owners), dtype=np.int32) out_pos[insert_position] = dofmap_pos - # Compute map from the data index sent to each process and the local number on the current process + # Compute map from the data index sent to each process and the local + # number on the current process proc_to_dof = np.zeros_like(input_cells, dtype=np.int32) proc_to_dof[insert_position] = np.arange(len(input_cells), dtype=np.int32) - del cum_pos, insert_position + del insert_position # Send sizes to create data structures for receiving from NeighAlltoAllv recv_size = np.zeros(len(source_ranks), dtype=np.int32) mesh_to_data_comm = comm.Create_dist_graph_adjacent( source_ranks.tolist(), dest_ranks.tolist(), reorder=False ) - mesh_to_data_comm.Neighbor_alltoall(out_size, recv_size) + mesh_to_data_comm.Neighbor_alltoall(dest_size, recv_size) # Prepare data-structures for receiving total_incoming = sum(recv_size) @@ -106,11 +90,11 @@ def send_dofmap_and_recv_values( inc_offsets[1:] = np.cumsum(recv_size) # Send data - s_msg = [out_cells, out_size, MPI.INT64_T] + s_msg = [out_cells, dest_size, MPI.INT64_T] r_msg = [inc_cells, recv_size, MPI.INT64_T] mesh_to_data_comm.Neighbor_alltoallv(s_msg, r_msg) - s_msg = [out_pos, out_size, MPI.INT32_T] + s_msg = [out_pos, dest_size, MPI.INT32_T] r_msg = [inc_pos, recv_size, MPI.INT32_T] mesh_to_data_comm.Neighbor_alltoallv(s_msg, r_msg) mesh_to_data_comm.Free() @@ -120,16 +104,16 @@ def send_dofmap_and_recv_values( # Map values based on input cells and dofmap local_cells = inc_cells - local_input_range[0] - values_to_distribute = values[dofmap_offsets[local_cells]+inc_pos] + values_to_distribute = values[dofmap_offsets[local_cells] + inc_pos] # Send input dofs back to owning process data_to_mesh_comm = comm.Create_dist_graph_adjacent( dest_ranks.tolist(), source_ranks.tolist(), reorder=False ) - incoming_global_dofs = np.zeros(sum(out_size), dtype=values.dtype) + incoming_global_dofs = np.zeros(sum(dest_size), dtype=values.dtype) s_msg = [values_to_distribute, recv_size, numpy_to_mpi[values.dtype.type]] - r_msg = [incoming_global_dofs, out_size, numpy_to_mpi[values.dtype.type]] + r_msg = [incoming_global_dofs, dest_size, numpy_to_mpi[values.dtype.type]] data_to_mesh_comm.Neighbor_alltoallv(s_msg, r_msg) # Sort incoming global dofs as they were inputted @@ -156,51 +140,26 @@ def send_and_recv_cell_perm( cell_owners: The rank to send the i-th entry of cells and perms to comm: Rank of comm to generate neighbourhood communicator from """ - dest_ranks = np.unique(cell_owners) + dest_ranks, dest_size = np.unique(cell_owners, return_counts=True) + dest_size = dest_size.astype(np.int32) mesh_to_data = comm.Create_dist_graph( [comm.rank], [len(dest_ranks)], dest_ranks.tolist(), reorder=False ) source, dest, _ = mesh_to_data.Get_dist_neighbors() - - # This becomes a (num_cells, num_dest_ranks) matrix where the (i,j) entry - # is True if cell i is owned by dest[j] - owners_transposed = cell_owners.reshape(-1, 1) - process_pos_indicator = (owners_transposed == np.asarray(dest)) - - # Compute number of cells owned by each rank - out_size = np.count_nonzero(process_pos_indicator, axis=0).astype(np.int32) - - # Compute send offset based of number of cells owned by each rank - offsets = np.zeros(len(out_size) + 1, dtype=np.intc) - offsets[1:] = np.cumsum(out_size) - assert offsets[-1] == len(cells) - - # As each cell can only be owned by one process, this returns what column - # the ith cell is owned by - proc_row, proc_col = np.nonzero(process_pos_indicator) - assert np.allclose(proc_row, np.arange(len(process_pos_indicator), dtype=np.int32)) - - # For a cell owned by process j, get its position in the list of cell owned by this process - cum_pos = np.cumsum(process_pos_indicator, axis=0) - - # Get the insert position (relative to process) for each dof. Subtract 0 as first occurence is equal to 1) due - # to the cumsum above - insert_position = cum_pos[proc_row, proc_col] - 1 - - # Compute aboslute insert position - insert_position += offsets[proc_col] + assert np.allclose(dest, dest_ranks) + insert_position = compute_insert_position(cell_owners, dest_ranks, dest_size) # Pack cells and permutations for sending out_cells = np.zeros_like(cells, dtype=np.int64) out_perm = np.zeros_like(perms, dtype=np.uint32) out_cells[insert_position] = cells out_perm[insert_position] = perms - del cum_pos, insert_position + del insert_position # Send sizes to create data structures for receiving from NeighAlltoAllv recv_size = np.zeros_like(source, dtype=np.int32) - mesh_to_data.Neighbor_alltoall(out_size, recv_size) + mesh_to_data.Neighbor_alltoall(dest_size, recv_size) # Prepare data-structures for receiving total_incoming = sum(recv_size) @@ -212,11 +171,11 @@ def send_and_recv_cell_perm( inc_offsets[1:] = np.cumsum(recv_size) # Send data - s_msg = [out_cells, out_size, MPI.INT64_T] + s_msg = [out_cells, dest_size, MPI.INT64_T] r_msg = [inc_cells, recv_size, MPI.INT64_T] mesh_to_data.Neighbor_alltoallv(s_msg, r_msg) - s_msg = [out_perm, out_size, MPI.UINT32_T] + s_msg = [out_perm, dest_size, MPI.UINT32_T] r_msg = [inc_perm, recv_size, MPI.UINT32_T] mesh_to_data.Neighbor_alltoallv(s_msg, r_msg) mesh_to_data.Free() @@ -240,58 +199,36 @@ def send_dofs_and_recv_values( input_array: Values for dofs array_start: The global starting index of `input_array`. """ - dest_ranks = np.unique(dofmap_owners) + dest_ranks, dest_size = np.unique(dofmap_owners, return_counts=True) + dest_size = dest_size.astype(np.int32) + dofmap_to_values = comm.Create_dist_graph( [comm.rank], [len(dest_ranks)], dest_ranks.tolist(), reorder=False ) source, dest, _ = dofmap_to_values.Get_dist_neighbors() - + assert np.allclose(dest_ranks, dest) # Compute amount of data to send to each process - # This becomes a (num_dofs, num_dest_ranks) matrix where the (i,j) entry - # is True if dof i is owned by dest_rank[j] - owners_transposed = dofmap_owners.reshape(-1, 1) - process_pos_indicator = (owners_transposed == dest_ranks) - - # Compute number of dofs owned by each rank - out_size = np.count_nonzero(process_pos_indicator, axis=0).astype(np.int32) - - # Compute send offset based of number of dofs owned by each rank - dofs_offsets = np.zeros(len(out_size) + 1, dtype=np.intc) - dofs_offsets[1:] = np.cumsum(out_size) - - # As each dof can only be owned by one process, this returns what column - # the ith dof is owned by - proc_row, proc_col = np.nonzero(process_pos_indicator) - assert len(proc_row) == len(proc_col) - assert np.allclose(proc_row, np.arange(len(process_pos_indicator), dtype=np.int32)) - - # For a dof owned by process j, get its position in the list of dofs owned by this process - cum_pos = np.cumsum(process_pos_indicator, axis=0) - - # Get the insert position (relative to process) for each dof. Subtract 0 as first occurence is equal to 1) due - # to the cumsum above - insert_position = cum_pos[proc_row, proc_col] - 1 - # Compute aboslute insert position - insert_position += dofs_offsets[proc_col] + insert_position = compute_insert_position(dofmap_owners, dest_ranks, dest_size) # Pack dofs for sending - out_dofs = np.zeros(dofs_offsets[-1], dtype=np.int64) + out_dofs = np.zeros(len(dofmap_owners), dtype=np.int64) out_dofs[insert_position] = input_dofmap - # Compute map from the data index sent to each process and the local number on the current process + # Compute map from the data index sent to each process and the local number on + # the current process proc_to_local = np.zeros_like(input_dofmap, dtype=np.int32) proc_to_local[insert_position] = np.arange(len(input_dofmap), dtype=np.int32) - del insert_position, cum_pos + del insert_position # Send sizes to create data structures for receiving from NeighAlltoAllv recv_size = np.zeros_like(source, dtype=np.int32) - dofmap_to_values.Neighbor_alltoall(out_size, recv_size) + dofmap_to_values.Neighbor_alltoall(dest_size, recv_size) # Send input dofs to processes holding input array inc_dofs = np.zeros(sum(recv_size), dtype=np.int64) - s_msg = [out_dofs, out_size, MPI.INT64_T] + s_msg = [out_dofs, dest_size, MPI.INT64_T] r_msg = [inc_dofs, recv_size, MPI.INT64_T] dofmap_to_values.Neighbor_alltoallv(s_msg, r_msg) dofmap_to_values.Free() @@ -302,7 +239,7 @@ def send_dofs_and_recv_values( values_to_dofmap = comm.Create_dist_graph_adjacent(dest, source, reorder=False) inc_values = np.zeros_like(out_dofs, dtype=input_array.dtype) s_msg_rev = [sending_values, recv_size, numpy_to_mpi[input_array.dtype.type]] - r_msg_rev = [inc_values, out_size, numpy_to_mpi[input_array.dtype.type]] + r_msg_rev = [inc_values, dest_size, numpy_to_mpi[input_array.dtype.type]] values_to_dofmap.Neighbor_alltoallv(s_msg_rev, r_msg_rev) values_to_dofmap.Free() diff --git a/src/adios4dolfinx/legacy_readers.py b/src/adios4dolfinx/legacy_readers.py index 43aea37..7bc64ff 100644 --- a/src/adios4dolfinx/legacy_readers.py +++ b/src/adios4dolfinx/legacy_readers.py @@ -9,19 +9,17 @@ from mpi4py import MPI +import adios2 import basix import dolfinx import numpy as np import numpy.typing as npt import ufl -from .adios2_helpers import (adios_to_numpy_dtype, read_array, - resolve_adios_scope) +from .adios2_helpers import adios_to_numpy_dtype, read_array, resolve_adios_scope from .comm_helpers import send_dofs_and_recv_values -from .utils import (compute_dofmap_pos, compute_local_range, - index_owner) +from .utils import compute_dofmap_pos, compute_insert_position, compute_local_range, index_owner -import adios2 adios2 = resolve_adios_scope(adios2) __all__ = [ @@ -39,7 +37,7 @@ def read_dofmap_legacy( engine: str, cells: npt.NDArray[np.int64], dof_pos: npt.NDArray[np.int32], - bs: int + bs: int, ) -> npt.NDArray[np.int64]: """ Read dofmap with given communicator, split in continuous chunks based on number of @@ -56,7 +54,8 @@ def read_dofmap_legacy( `input_dofmap.links(cells[i])[dof_pos[i]]` Returns: - The global dof index in the input data for each dof described by the (cells[i], dof_pos[i]) tuples. + The global dof index in the input data for each dof described by + the (cells[i], dof_pos[i]) tuples. .. note:: No MPI communication is done during this call @@ -89,7 +88,10 @@ def read_dofmap_legacy( ) else: d_offsets.SetSelection( - [[local_cell_range[0], 0], [local_cell_range[1] + 1 - local_cell_range[0], shape[1]]] + [ + [local_cell_range[0], 0], + [local_cell_range[1] + 1 - local_cell_range[0], shape[1]], + ] ) in_offsets = np.empty( (local_cell_range[1] + 1 - local_cell_range[0], shape[1]), @@ -105,13 +107,12 @@ def read_dofmap_legacy( if len(shape) == 1: cell_dofs.SetSelection([[in_offsets[0]], [in_offsets[-1] - in_offsets[0]]]) - in_dofmap = np.empty( - in_offsets[-1] - in_offsets[0], dtype=cell_dofs.Type().strip("_t") - ) + in_dofmap = np.empty(in_offsets[-1] - in_offsets[0], dtype=cell_dofs.Type().strip("_t")) else: cell_dofs.SetSelection([[in_offsets[0], 0], [in_offsets[-1] - in_offsets[0], shape[1]]]) in_dofmap = np.empty( - (in_offsets[-1] - in_offsets[0], shape[1]), dtype=cell_dofs.Type().strip("_t") + (in_offsets[-1] - in_offsets[0], shape[1]), + dtype=cell_dofs.Type().strip("_t"), ) assert shape[1] == 1 @@ -121,14 +122,17 @@ def read_dofmap_legacy( # Map xxxyyyzzz to xyzxyz mapped_dofmap = np.empty_like(in_dofmap) - for i in range(len(in_offsets)-1): - pos_begin, pos_end = in_offsets[i]-in_offsets[0], in_offsets[i+1]-in_offsets[0] + for i in range(len(in_offsets) - 1): + pos_begin, pos_end = ( + in_offsets[i] - in_offsets[0], + in_offsets[i + 1] - in_offsets[0], + ) dofs_i = in_dofmap[pos_begin:pos_end] assert (pos_end - pos_begin) % bs == 0 - num_dofs_local = int((pos_end-pos_begin)//bs) + num_dofs_local = int((pos_end - pos_begin) // bs) for k in range(bs): for j in range(num_dofs_local): - mapped_dofmap[int(pos_begin + j*bs+k)] = dofs_i[int(num_dofs_local*k+j)] + mapped_dofmap[int(pos_begin + j * bs + k)] = dofs_i[int(num_dofs_local * k + j)] # Extract dofmap data global_dofs = np.zeros_like(cells, dtype=np.int64) @@ -148,6 +152,7 @@ def send_cells_and_receive_dofmap_index( comm: MPI.Intracomm, source_ranks: npt.NDArray[np.int32], dest_ranks: npt.NDArray[np.int32], + dest_size: npt.NDArray[np.int32], output_owners: npt.NDArray[np.int32], input_cells: npt.NDArray[np.int64], dofmap_pos: npt.NDArray[np.int32], @@ -155,58 +160,41 @@ def send_cells_and_receive_dofmap_index( dofmap_path: str, xdofmap_path: str, engine: str, - bs: int + bs: int, ) -> npt.NDArray[np.int64]: """ Given a set of positions in input dofmap, give the global input index of this dofmap entry in input file. """ - # Compute amount of data to send to each process - owners_transposed = output_owners.reshape(-1, 1) - process_pos_indicator = (owners_transposed == np.asarray(dest_ranks)) - out_size = np.count_nonzero(process_pos_indicator, axis=0).astype(np.int32) - recv_size = np.zeros(len(source_ranks), dtype=np.int32) mesh_to_data_comm = comm.Create_dist_graph_adjacent( source_ranks.tolist(), dest_ranks.tolist(), reorder=False ) # Send sizes to create data structures for receiving from NeighAlltoAllv - mesh_to_data_comm.Neighbor_alltoall(out_size, recv_size) + mesh_to_data_comm.Neighbor_alltoall(dest_size, recv_size) - # Sort output for sending - offsets = np.zeros(len(out_size) + 1, dtype=np.intc) - offsets[1:] = np.cumsum(out_size) - out_cells = np.zeros(offsets[-1], dtype=np.int64) - out_pos = np.zeros(offsets[-1], dtype=np.int32) + # Sort output for sending and fill send data + out_cells = np.zeros(len(output_owners), dtype=np.int64) + out_pos = np.zeros(len(output_owners), dtype=np.int32) proc_to_dof = np.zeros_like(input_cells, dtype=np.int32) - - # Fill outgoing data - proc_row, proc_col = np.nonzero(process_pos_indicator) - assert np.allclose(proc_row, np.arange(len(process_pos_indicator), dtype=np.int32)) - cum_pos = np.cumsum(process_pos_indicator, axis=0) - insert_position = cum_pos[np.arange(len(proc_col), dtype=np.int32), proc_col] - 1 - insertion_array = offsets[proc_col] + insert_position + insertion_array = compute_insert_position(output_owners, dest_ranks, dest_size) out_cells[insertion_array] = input_cells out_pos[insertion_array] = dofmap_pos proc_to_dof[insertion_array] = np.arange(len(input_cells), dtype=np.int32) - del cum_pos, insert_position, insertion_array + del insertion_array # Prepare data-structures for receiving total_incoming = sum(recv_size) inc_cells = np.zeros(total_incoming, dtype=np.int64) inc_pos = np.zeros(total_incoming, dtype=np.intc) - # Compute incoming offset - inc_offsets = np.zeros(len(recv_size) + 1, dtype=np.intc) - inc_offsets[1:] = np.cumsum(recv_size) - # Send data - s_msg = [out_cells, out_size, MPI.INT64_T] + s_msg = [out_cells, dest_size, MPI.INT64_T] r_msg = [inc_cells, recv_size, MPI.INT64_T] mesh_to_data_comm.Neighbor_alltoallv(s_msg, r_msg) - s_msg = [out_pos, out_size, MPI.INT32_T] + s_msg = [out_pos, dest_size, MPI.INT32_T] r_msg = [inc_pos, recv_size, MPI.INT32_T] mesh_to_data_comm.Neighbor_alltoallv(s_msg, r_msg) mesh_to_data_comm.Free() @@ -220,16 +208,16 @@ def send_cells_and_receive_dofmap_index( engine, inc_cells, inc_pos, - bs + bs, ) # Send input dofs back to owning process data_to_mesh_comm = comm.Create_dist_graph_adjacent( dest_ranks.tolist(), source_ranks.tolist(), reorder=False ) - incoming_global_dofs = np.zeros(sum(out_size), dtype=np.int64) + incoming_global_dofs = np.zeros(sum(dest_size), dtype=np.int64) s_msg = [input_dofs, recv_size, MPI.INT64_T] - r_msg = [incoming_global_dofs, out_size, MPI.INT64_T] + r_msg = [incoming_global_dofs, dest_size, MPI.INT64_T] data_to_mesh_comm.Neighbor_alltoallv(s_msg, r_msg) # Sort incoming global dofs as they were inputted @@ -241,7 +229,6 @@ def send_cells_and_receive_dofmap_index( def read_mesh_geometry(io: adios2.ADIOS, infile: adios2.Engine, group: str): - for geometry_key in [f"{group}/geometry", f"{group}/coordinates"]: if geometry_key in io.AvailableVariables().keys(): break @@ -251,18 +238,21 @@ def read_mesh_geometry(io: adios2.ADIOS, infile: adios2.Engine, group: str): geometry = io.InquireVariable(geometry_key) shape = geometry.Shape() local_range = compute_local_range(MPI.COMM_WORLD, shape[0]) - geometry.SetSelection( - [[local_range[0], 0], [local_range[1] - local_range[0], shape[1]]] - ) + geometry.SetSelection([[local_range[0], 0], [local_range[1] - local_range[0], shape[1]]]) mesh_geometry = np.empty( - (local_range[1] - local_range[0], shape[1]), dtype=adios_to_numpy_dtype[geometry.Type()]) + (local_range[1] - local_range[0], shape[1]), + dtype=adios_to_numpy_dtype[geometry.Type()], + ) infile.Get(geometry, mesh_geometry, adios2.Mode.Sync) return mesh_geometry def read_mesh_from_legacy_h5( - comm: MPI.Intracomm, filename: pathlib.Path, group: str, cell_type: str = "tetrahedron" + comm: MPI.Intracomm, + filename: pathlib.Path, + group: str, + cell_type: str = "tetrahedron", ) -> dolfinx.mesh.Mesh: """ Read mesh from `h5`-file generated by legacy DOLFIN `HDF5File.write` or `XDMF.write_checkpoint`. @@ -292,9 +282,7 @@ def read_mesh_from_legacy_h5( topology = io.InquireVariable(f"{group}/topology") shape = topology.Shape() local_range = compute_local_range(MPI.COMM_WORLD, shape[0]) - topology.SetSelection( - [[local_range[0], 0], [local_range[1] - local_range[0], shape[1]]] - ) + topology.SetSelection([[local_range[0], 0], [local_range[1] - local_range[0], shape[1]]]) mesh_topology = np.empty( (local_range[1] - local_range[0], shape[1]), dtype=topology.Type().strip("_t") @@ -321,9 +309,7 @@ def read_mesh_from_legacy_h5( shape=(mesh_geometry.shape[1],), ) domain = ufl.Mesh(element) - return dolfinx.mesh.create_mesh( - MPI.COMM_WORLD, mesh_topology, mesh_geometry, domain - ) + return dolfinx.mesh.create_mesh(MPI.COMM_WORLD, mesh_topology, mesh_geometry, domain) def read_function_from_legacy_h5( @@ -333,7 +319,9 @@ def read_function_from_legacy_h5( group: str = "mesh", step: Optional[int] = None, ): - """Read function from a `h5`-file generated by legacy DOLFIN `HDF5File.write` or `XDMF.write_checkpoint`. + """ + Read function from a `h5`-file generated by legacy DOLFIN `HDF5File.write` + or `XDMF.write_checkpoint`. Args: comm : MPI communicator to distribute mesh over @@ -365,7 +353,7 @@ def read_function_from_legacy_h5( # 1.1 Compute mesh->input communicator num_cells_global = mesh.topology.index_map(mesh.topology.dim).size_global owners = index_owner(mesh.comm, input_cells, num_cells_global) - unique_owners = np.unique(owners) + unique_owners, owner_count = np.unique(owners, return_counts=True) # FIXME: In C++ use NBX to find neighbourhood _tmp_comm = mesh.comm.Create_dist_graph( [mesh.comm.rank], [len(unique_owners)], unique_owners, reorder=False @@ -389,6 +377,7 @@ def read_function_from_legacy_h5( comm, np.asarray(source, dtype=np.int32), np.asarray(dest, dtype=np.int32), + owner_count.astype(np.int32), owners, input_cells, dof_pos, @@ -396,7 +385,7 @@ def read_function_from_legacy_h5( f"/{group}/cell_dofs", f"/{group}/x_cell_dofs", "HDF5", - bs + bs, ) # ----------------------Step 3--------------------------------- @@ -408,8 +397,9 @@ def read_function_from_legacy_h5( # Read input data adios = adios2.ADIOS(comm) - local_array, starting_pos = read_array(adios, filename, f"/{group}/{vector_group}", "HDF5", - comm, legacy=True) + local_array, starting_pos = read_array( + adios, filename, f"/{group}/{vector_group}", "HDF5", comm, legacy=True + ) # Send global dof indices to correct input process, and receive value of given dof local_values = send_dofs_and_recv_values( diff --git a/src/adios4dolfinx/original_checkpoint.py b/src/adios4dolfinx/original_checkpoint.py new file mode 100644 index 0000000..c965761 --- /dev/null +++ b/src/adios4dolfinx/original_checkpoint.py @@ -0,0 +1,354 @@ +# Copyright (C) 2024 Jørgen Schartum Dokken +# +# This file is part of adios4dolfinx +# +# SPDX-License-Identifier: MIT + +from pathlib import Path + +from mpi4py import MPI + +import adios2 +import dolfinx +import numpy as np + +from .adios2_helpers import resolve_adios_scope +from .comm_helpers import numpy_to_mpi +from .structures import FunctionData, MeshData +from .utils import ( + compute_insert_position, + compute_local_range, + index_owner, + unroll_dofmap, + unroll_insert_position, +) +from .writers import write_function, write_mesh + +adios2 = resolve_adios_scope(adios2) + +__all__ = ["write_function_on_input_mesh", "write_mesh_input_order"] + + +def create_original_mesh_data(mesh: dolfinx.mesh.Mesh) -> MeshData: + """ + Store data locally on output process + """ + + # 1. Send cell indices owned by current process to the process which owned its input + + # Get the input cell index for cells owned by this process + num_owned_cells = mesh.topology.index_map(mesh.topology.dim).size_local + original_cell_index = mesh.topology.original_cell_index[:num_owned_cells] + + # Compute owner of cells on this process based on the original cell index + num_cells_global = mesh.topology.index_map(mesh.topology.dim).size_global + output_cell_owner = index_owner(mesh.comm, original_cell_index, num_cells_global) + local_cell_range = compute_local_range(mesh.comm, num_cells_global) + + # Compute outgoing edges from current process to outputting process + # Computes the number of cells sent to each process at the same time + cell_destinations, send_cells_per_proc = np.unique(output_cell_owner, return_counts=True) + cell_to_output_comm = mesh.comm.Create_dist_graph( + [mesh.comm.rank], + [len(cell_destinations)], + cell_destinations.tolist(), + reorder=False, + ) + cell_sources, cell_dests, _ = cell_to_output_comm.Get_dist_neighbors() + assert np.allclose(cell_dests, cell_destinations) + + # Compute number of recieving cells + recv_cells_per_proc = np.zeros_like(cell_sources, dtype=np.int32) + if len(send_cells_per_proc) == 0: + send_cells_per_proc = np.zeros(1, dtype=np.int32) + if len(recv_cells_per_proc) == 0: + recv_cells_per_proc = np.zeros(1, dtype=np.int32) + send_cells_per_proc = send_cells_per_proc.astype(np.int32) + cell_to_output_comm.Neighbor_alltoall(send_cells_per_proc, recv_cells_per_proc) + assert recv_cells_per_proc.sum() == local_cell_range[1] - local_cell_range[0] + # Pack and send cell indices (used for mapping topology dofmap later) + cell_insert_position = compute_insert_position( + output_cell_owner, cell_destinations, send_cells_per_proc + ) + send_cells = np.empty_like(cell_insert_position, dtype=np.int64) + send_cells[cell_insert_position] = original_cell_index + recv_cells = np.empty(recv_cells_per_proc.sum(), dtype=np.int64) + send_cells_msg = [send_cells, send_cells_per_proc, MPI.INT64_T] + recv_cells_msg = [recv_cells, recv_cells_per_proc, MPI.INT64_T] + cell_to_output_comm.Neighbor_alltoallv(send_cells_msg, recv_cells_msg) + del send_cells_msg, recv_cells_msg, send_cells + + # Map received cells to the local index + local_cell_index = recv_cells - local_cell_range[0] + + # 2. Create dofmap based on original geometry indices and re-order in the same order as original + # cell indices on output process + + # Get original node index for all nodes (including ghosts) and convert dofmap to these indices + original_node_index = mesh.geometry.input_global_indices + _, num_nodes_per_cell = mesh.geometry.dofmap.shape + local_geometry_dofmap = mesh.geometry.dofmap[:num_owned_cells, :] + global_geometry_dofmap = original_node_index[local_geometry_dofmap.reshape(-1)] + + # Unroll insert position for geometry dofmap + dofmap_insert_position = unroll_insert_position(cell_insert_position, num_nodes_per_cell) + + # Create and commmnicate connecitivity in original geometry indices + send_geometry_dofmap = np.empty_like(dofmap_insert_position, dtype=np.int64) + send_geometry_dofmap[dofmap_insert_position] = global_geometry_dofmap + del global_geometry_dofmap + send_sizes_dofmap = send_cells_per_proc * num_nodes_per_cell + recv_sizes_dofmap = recv_cells_per_proc * num_nodes_per_cell + recv_geometry_dofmap = np.empty(recv_sizes_dofmap.sum(), dtype=np.int64) + send_geometry_dofmap_msg = [send_geometry_dofmap, send_sizes_dofmap, MPI.INT64_T] + recv_geometry_dofmap_msg = [recv_geometry_dofmap, recv_sizes_dofmap, MPI.INT64_T] + cell_to_output_comm.Neighbor_alltoallv(send_geometry_dofmap_msg, recv_geometry_dofmap_msg) + del send_geometry_dofmap_msg, recv_geometry_dofmap_msg + + # Reshape dofmap and sort by original cell index + recv_dofmap = recv_geometry_dofmap.reshape(-1, num_nodes_per_cell) + sorted_recv_dofmap = np.empty_like(recv_dofmap) + sorted_recv_dofmap[local_cell_index] = recv_dofmap + + # 3. Move geometry coordinates to input process + # Compute outgoing edges from current process and create neighbourhood communicator + # Also create number of outgoing cells at the same time + num_owned_nodes = mesh.geometry.index_map().size_local + num_nodes_global = mesh.geometry.index_map().size_global + output_node_owner = index_owner( + mesh.comm, original_node_index[:num_owned_nodes], num_nodes_global + ) + + node_destinations, send_nodes_per_proc = np.unique(output_node_owner, return_counts=True) + send_nodes_per_proc = send_nodes_per_proc.astype(np.int32) + geometry_to_owner_comm = mesh.comm.Create_dist_graph( + [mesh.comm.rank], + [len(node_destinations)], + node_destinations.tolist(), + reorder=False, + ) + + node_sources, node_dests, _ = geometry_to_owner_comm.Get_dist_neighbors() + assert np.allclose(node_dests, node_destinations) + + # Compute send node insert positions + send_nodes_position = compute_insert_position( + output_node_owner, node_destinations, send_nodes_per_proc + ) + unrolled_nodes_positiion = unroll_insert_position(send_nodes_position, 3) + + send_coordinates = np.empty_like(unrolled_nodes_positiion, dtype=mesh.geometry.x.dtype) + send_coordinates[unrolled_nodes_positiion] = mesh.geometry.x[:num_owned_nodes, :].reshape(-1) + + # Send and recieve geometry sizes + send_coordinate_sizes = (send_nodes_per_proc * 3).astype(np.int32) + recv_coordinate_sizes = np.zeros_like(node_sources, dtype=np.int32) + geometry_to_owner_comm.Neighbor_alltoall(send_coordinate_sizes, recv_coordinate_sizes) + + # Send node coordinates + recv_coordinates = np.empty(recv_coordinate_sizes.sum(), dtype=mesh.geometry.x.dtype) + mpi_type = numpy_to_mpi[recv_coordinates.dtype.type] + send_coord_msg = [send_coordinates, send_coordinate_sizes, mpi_type] + recv_coord_msg = [recv_coordinates, recv_coordinate_sizes, mpi_type] + geometry_to_owner_comm.Neighbor_alltoallv(send_coord_msg, recv_coord_msg) + del send_coord_msg, recv_coord_msg + + # Send node ordering for reordering the coordinates on output process + send_nodes = np.empty(num_owned_nodes, dtype=np.int64) + send_nodes[send_nodes_position] = original_node_index[:num_owned_nodes] + + recv_indices = np.empty(recv_coordinate_sizes.sum() // 3, dtype=np.int64) + send_nodes_msg = [send_nodes, send_nodes_per_proc, MPI.INT64_T] + recv_nodes_msg = [recv_indices, recv_coordinate_sizes // 3, MPI.INT64_T] + geometry_to_owner_comm.Neighbor_alltoallv(send_nodes_msg, recv_nodes_msg) + + # Compute local ording of received nodes + local_node_range = compute_local_range(mesh.comm, num_nodes_global) + recv_indices -= local_node_range[0] + + # Sort geometry based on input index and strip to gdim + gdim = mesh.geometry.dim + recv_nodes = recv_coordinates.reshape(-1, 3) + geometry = np.empty_like(recv_nodes) + geometry[recv_indices, :] = recv_nodes + geometry = geometry[:, :gdim].copy() + assert local_node_range[1] - local_node_range[0] == geometry.shape[0] + cmap = mesh.geometry.cmap + return MeshData( + local_geometry=geometry, + local_geometry_pos=local_node_range, + num_nodes_global=num_nodes_global, + local_topology=sorted_recv_dofmap, + local_topology_pos=local_cell_range, + num_cells_global=num_cells_global, + cell_type=mesh.topology.cell_name(), + degree=cmap.degree, + lagrange_variant=cmap.variant, + ) + + +def create_function_data_on_original_mesh(u: dolfinx.fem.Function) -> FunctionData: + """ + Create data object to save with ADIOS2 + """ + mesh = u.function_space.mesh + + # Compute what cells owned by current process should be sent to what output process + # FIXME: Cache this + num_owned_cells = mesh.topology.index_map(mesh.topology.dim).size_local + original_cell_index = mesh.topology.original_cell_index[:num_owned_cells] + + # Compute owner of cells on this process based on the original cell index + num_cells_global = mesh.topology.index_map(mesh.topology.dim).size_global + output_cell_owner = index_owner(mesh.comm, original_cell_index, num_cells_global) + local_cell_range = compute_local_range(mesh.comm, num_cells_global) + + # Compute outgoing edges from current process to outputting process + # Computes the number of cells sent to each process at the same time + cell_destinations, send_cells_per_proc = np.unique(output_cell_owner, return_counts=True) + send_cells_per_proc = send_cells_per_proc.astype(np.int32) + cell_to_output_comm = mesh.comm.Create_dist_graph( + [mesh.comm.rank], + [len(cell_destinations)], + cell_destinations.tolist(), + reorder=False, + ) + cell_sources, cell_dests, _ = cell_to_output_comm.Get_dist_neighbors() + assert np.allclose(cell_dests, cell_destinations) + + # Compute number of recieving cells + recv_cells_per_proc = np.zeros_like(cell_sources, dtype=np.int32) + send_cells_per_proc = send_cells_per_proc.astype(np.int32) + cell_to_output_comm.Neighbor_alltoall(send_cells_per_proc, recv_cells_per_proc) + assert recv_cells_per_proc.sum() == local_cell_range[1] - local_cell_range[0] + + # Pack and send cell indices (used for mapping topology dofmap later) + cell_insert_position = compute_insert_position( + output_cell_owner, cell_destinations, send_cells_per_proc + ) + send_cells = np.empty_like(cell_insert_position, dtype=np.int64) + send_cells[cell_insert_position] = original_cell_index + recv_cells = np.empty(recv_cells_per_proc.sum(), dtype=np.int64) + send_cells_msg = [send_cells, send_cells_per_proc, MPI.INT64_T] + recv_cells_msg = [recv_cells, recv_cells_per_proc, MPI.INT64_T] + cell_to_output_comm.Neighbor_alltoallv(send_cells_msg, recv_cells_msg) + del send_cells_msg, recv_cells_msg + + # Map received cells to the local index + local_cell_index = recv_cells - local_cell_range[0] + + # Pack and send cell permutation info + mesh.topology.create_entity_permutations() + cell_permutation_info = mesh.topology.get_cell_permutation_info()[:num_owned_cells] + send_perm = np.empty_like(send_cells, dtype=np.uint32) + send_perm[cell_insert_position] = cell_permutation_info + recv_perm = np.empty_like(recv_cells, dtype=np.uint32) + send_perm_msg = [send_perm, send_cells_per_proc, MPI.UINT32_T] + recv_perm_msg = [recv_perm, recv_cells_per_proc, MPI.UINT32_T] + cell_to_output_comm.Neighbor_alltoallv(send_perm_msg, recv_perm_msg) + cell_permutation_info = np.empty_like(recv_perm) + cell_permutation_info[local_cell_index] = recv_perm + + # 2. Extract function data (array is the same, keeping global indices from DOLFINx) + # Dofmap is moved by the original cell index similar to the mesh geometry dofmap + dofmap = u.function_space.dofmap + dmap = dofmap.list + num_dofs_per_cell = dmap.shape[1] + dofmap_bs = dofmap.bs + index_map_bs = dofmap.index_map_bs + + # Unroll dofmap for block size + unrolled_dofmap = unroll_dofmap(dofmap.list[:num_owned_cells, :], dofmap_bs) + dmap_loc = (unrolled_dofmap // index_map_bs).reshape(-1) + dmap_rem = (unrolled_dofmap % index_map_bs).reshape(-1) + + # Convert imap index to global index + imap_global = dofmap.index_map.local_to_global(dmap_loc) + dofmap_global = (imap_global * index_map_bs + dmap_rem).reshape(unrolled_dofmap.shape) + num_dofs_per_cell = dofmap_global.shape[1] + dofmap_insert_position = unroll_insert_position(cell_insert_position, num_dofs_per_cell) + + # Create and send array for global dofmap + send_function_dofmap = np.empty(len(dofmap_insert_position), dtype=np.int64) + send_function_dofmap[dofmap_insert_position] = dofmap_global.reshape(-1) + send_sizes_dofmap = send_cells_per_proc * num_dofs_per_cell + recv_size_dofmap = recv_cells_per_proc * num_dofs_per_cell + recv_function_dofmap = np.empty(recv_size_dofmap.sum(), dtype=np.int64) + cell_to_output_comm.Neighbor_alltoallv( + [send_function_dofmap, send_sizes_dofmap, MPI.INT64_T], + [recv_function_dofmap, recv_size_dofmap, MPI.INT64_T], + ) + + shaped_dofmap = recv_function_dofmap.reshape( + local_cell_range[1] - local_cell_range[0], num_dofs_per_cell + ).copy() + final_dofmap = np.empty_like(shaped_dofmap) + final_dofmap[local_cell_index] = shaped_dofmap + final_dofmap = final_dofmap.reshape(-1) + + # Get offsets of dofmap + num_cells_local = local_cell_range[1] - local_cell_range[0] + num_dofs_local_dmap = num_cells_local * num_dofs_per_cell + dofmap_imap = dolfinx.common.IndexMap(mesh.comm, num_dofs_local_dmap) + local_dofmap_offsets = np.arange(num_cells_local + 1, dtype=np.int64) + local_dofmap_offsets[:] *= num_dofs_per_cell + local_dofmap_offsets[:] += dofmap_imap.local_range[0] + + num_dofs_local = dofmap.index_map.size_local * dofmap.index_map_bs + num_dofs_global = dofmap.index_map.size_global * dofmap.index_map_bs + local_range = np.asarray(dofmap.index_map.local_range, dtype=np.int64) * dofmap.index_map_bs + return FunctionData( + cell_permutations=cell_permutation_info, + local_cell_range=local_cell_range, + num_cells_global=num_cells_global, + dofmap_array=final_dofmap, + dofmap_offsets=local_dofmap_offsets, + values=u.x.array[:num_dofs_local].copy(), + dof_range=local_range, + num_dofs_global=num_dofs_global, + dofmap_range=dofmap_imap.local_range, + global_dofs_in_dofmap=dofmap_imap.size_global, + name=u.name, + ) + + +def write_function_on_input_mesh( + u: dolfinx.fem.Function, + filename: Path | str, + engine: str = "BP4", + mode: adios2.Mode = adios2.Mode.Append, + time: float = 0.0, +): + """ + Write function checkpoint (to be read with the input mesh). + + Parameters: + u: The function to checkpoint + filename: The filename to write to + engine: The ADIOS2 engine to use + mode: The ADIOS2 mode to use (write or append) + time: Time-stamp associated with function at current write step + + """ + mesh = u.function_space.mesh + function_data = create_function_data_on_original_mesh(u) + fname = Path(filename) + write_function( + mesh.comm, + function_data, + fname, + engine, + mode, + time, + io_name="OriginalFunctionWriter", + ) + + +def write_mesh_input_order(mesh: dolfinx.mesh.Mesh, filename: Path | str, engine: str = "BP4"): + """ + Write mesh to checkpoint file in original input ordering + """ + + mesh_data = create_original_mesh_data(mesh) + fname = Path(filename) + write_mesh(mesh.comm, mesh_data, fname, engine, io_name="OriginalMeshWriter") diff --git a/src/adios4dolfinx/snapshot.py b/src/adios4dolfinx/snapshot.py new file mode 100644 index 0000000..c797377 --- /dev/null +++ b/src/adios4dolfinx/snapshot.py @@ -0,0 +1,57 @@ +# Copyright (C) 2024 Jørgen Schartum Dokken +# +# This file is part of adios4dolfinx +# +# SPDX-License-Identifier: MIT + +from pathlib import Path + +import adios2 +import dolfinx + +from .adios2_helpers import resolve_adios_scope + +adios2 = resolve_adios_scope(adios2) + +__all__ = [ + "snapshot_checkpoint", +] + + +def snapshot_checkpoint(uh: dolfinx.fem.Function, file: Path, mode: adios2.Mode): + """Read or write a snapshot checkpoint + + This checkpoint is only meant to be used on the same mesh during the same simulation. + + :param uh: The function to write data from or read to + :param file: The file to write to or read from + :param mode: Either read or write + """ + # Create ADIOS IO + adios = adios2.ADIOS(uh.function_space.mesh.comm) + io_name = "SnapshotCheckPoint" + io = adios.DeclareIO(io_name) + io.SetEngine("BP4") + if mode not in [adios2.Mode.Write, adios2.Mode.Read]: + raise ValueError("Got invalid mode {mode}") + adios_file = io.Open(str(file), mode) + + if mode == adios2.Mode.Write: + dofmap = uh.function_space.dofmap + num_dofs_local = dofmap.index_map.size_local * dofmap.index_map_bs + local_dofs = uh.x.array[:num_dofs_local].copy() + + # Write to file + adios_file.BeginStep() + dofs = io.DefineVariable("dofs", local_dofs, count=[num_dofs_local]) + adios_file.Put(dofs, local_dofs, adios2.Mode.Sync) + adios_file.EndStep() + else: + adios_file.BeginStep() + in_variable = io.InquireVariable("dofs") + in_variable.SetBlockSelection(uh.function_space.mesh.comm.rank) + adios_file.Get(in_variable, uh.x.array, adios2.Mode.Sync) + adios_file.EndStep() + uh.x.scatter_forward() + adios_file.Close() + adios.RemoveIO(io_name) diff --git a/src/adios4dolfinx/structures.py b/src/adios4dolfinx/structures.py new file mode 100644 index 0000000..068987e --- /dev/null +++ b/src/adios4dolfinx/structures.py @@ -0,0 +1,47 @@ +# Copyright (C) 2024 Jørgen Schartum Dokken +# +# This file is part of adios4dolfinx +# +# SPDX-License-Identifier: MIT + + +from dataclasses import dataclass +from typing import Tuple + +import numpy as np +import numpy.typing as npt + +"""Internal library classes for storing mesh and function data""" +__all__ = ["MeshData", "FunctionData"] + + +@dataclass +class MeshData: + # 2 dimensional array of node coordinates + local_geometry: npt.NDArray[np.floating] + local_geometry_pos: Tuple[int, int] # Insert range on current process for geometry nodes + num_nodes_global: int # Number of nodes in global geometry array + + local_topology: npt.NDArray[np.int64] # 2 dimensional connecitivty array for mesh topology + # Insert range on current process for topology + local_topology_pos: Tuple[int, int] + num_cells_global: int # NUmber of cells in global topology + + cell_type: str + degree: int + lagrange_variant: int + + +@dataclass +class FunctionData: + cell_permutations: npt.NDArray[np.uint32] # Cell permutations for dofmap + local_cell_range: Tuple[int, int] # Range of cells on current process + num_cells_global: int # Number of cells in global topology + dofmap_array: npt.NDArray[np.int64] # Local function dofmap (using global indices) + dofmap_offsets: npt.NDArray[np.int64] # Global dofmap offsets + dofmap_range: Tuple[int, int] # Range of dofmap on current process + global_dofs_in_dofmap: int # Number of entries in global dofmap + values: npt.NDArray[np.floating] # Local function values + dof_range: Tuple[int, int] # Range of local function values + num_dofs_global: int # Number of global function values + name: str # Name of function diff --git a/src/adios4dolfinx/utils.py b/src/adios4dolfinx/utils.py index 9fc1468..7b13bc8 100644 --- a/src/adios4dolfinx/utils.py +++ b/src/adios4dolfinx/utils.py @@ -4,7 +4,19 @@ # # SPDX-License-Identifier: MIT -__all__ = ["compute_local_range", "index_owner", "compute_dofmap_pos", "unroll_dofmap"] +""" +Vectorized numpy operations used internally in adios4dolfinx +""" + + +__all__ = [ + "compute_local_range", + "index_owner", + "compute_dofmap_pos", + "unroll_dofmap", + "compute_insert_position", + "unroll_insert_position", +] from typing import Tuple, Union from mpi4py import MPI @@ -17,6 +29,67 @@ valid_real_types = Union[np.float32, np.float64] +def compute_insert_position( + data_owner: npt.NDArray[np.int32], + destination_ranks: npt.NDArray[np.int32], + out_size: npt.NDArray[np.int32], +) -> npt.NDArray[np.int32]: + """ + Giving a list of ranks, compute the local insert position for each rank in a list + sorted by destination ranks. This function is used for packing data from a + given process to its destination processes. + + Example: + + .. highlight:: python + .. code-block:: python + + data_owner = [0, 1, 1, 0, 2, 3] + destination_ranks = [2,0,3,1] + out_size = [1, 2, 1, 2] + insert_position = compute_insert_position(data_owner, destination_ranks, out_size) + + Insert position is then ``[1, 4, 5, 2, 0, 3]`` + """ + process_pos_indicator = data_owner.reshape(-1, 1) == destination_ranks + + # Compute offsets for insertion based on input size + send_offsets = np.zeros(len(out_size) + 1, dtype=np.intc) + send_offsets[1:] = np.cumsum(out_size) + assert send_offsets[-1] == len(data_owner) + + # Compute local insert index on each process + proc_row, proc_col = np.nonzero(process_pos_indicator) + cum_pos = np.cumsum(process_pos_indicator, axis=0) + insert_position = cum_pos[proc_row, proc_col] - 1 + + # Add process offset for each local index + insert_position += send_offsets[proc_col] + return insert_position + + +def unroll_insert_position( + insert_position: npt.NDArray[np.int32], block_size: int +) -> npt.NDArray[np.int32]: + """ + Unroll insert position by a block size + + Example: + + + .. highlight:: python + .. code-block:: python + + insert_position = [1, 4, 5, 2, 0, 3] + unrolled_ip = unroll_insert_position(insert_position, 3) + + where ``unrolled_ip = [3, 4 ,5, 12, 13, 14, 15, 16, 17, 6, 7, 8, 0, 1, 2, 9, 10, 11]`` + """ + unrolled_ip = np.repeat(insert_position, block_size) * block_size + unrolled_ip += np.tile(np.arange(block_size), len(insert_position)) + return unrolled_ip + + def compute_local_range(comm: MPI.Intracomm, N: np.int64): """ Divide a set of `N` objects into `M` partitions, where `M` is @@ -54,9 +127,9 @@ def index_owner( r = N % size owner = np.empty_like(indices, dtype=np.int32) - owner[indices < r * n + 1] = indices[indices < r * n + 1] // (n + 1) - owner[indices >= r * n + 1] = r + (indices[indices >= r * n + 1] - r * (n + 1)) // n - + inc_remainder = indices < (n + 1) * r + owner[inc_remainder] = indices[inc_remainder] // (n + 1) + owner[~inc_remainder] = r + (indices[~inc_remainder] - r * (n + 1)) // n return owner @@ -67,8 +140,7 @@ def unroll_dofmap(dofs: npt.NDArray[np.int32], bs: int) -> npt.NDArray[np.int32] is of size `(num_cells, bs*num_dofs_per_cell)` """ num_cells, num_dofs_per_cell = dofs.shape - unrolled_dofmap = np.repeat(dofs, bs).reshape( - num_cells, num_dofs_per_cell * bs) * bs + unrolled_dofmap = np.repeat(dofs, bs).reshape(num_cells, num_dofs_per_cell * bs) * bs unrolled_dofmap += np.tile(np.arange(bs), num_dofs_per_cell) return unrolled_dofmap @@ -93,15 +165,15 @@ def compute_dofmap_pos( local_cell = np.empty( num_owned_dofs, dtype=np.int32 ) # Local cell index for each dof owned by process - dof_pos = np.empty( - num_owned_dofs, dtype=np.int32 - ) # Position in dofmap for said dof + dof_pos = np.empty(num_owned_dofs, dtype=np.int32) # Position in dofmap for said dof unrolled_dofmap = unroll_dofmap(dofs[:num_owned_cells, :], dofmap_bs) markers = unrolled_dofmap < num_owned_dofs local_indices = np.broadcast_to(np.arange(markers.shape[1]), markers.shape) cell_indicator = np.broadcast_to( - np.arange(num_owned_cells, dtype=np.int32).reshape(-1, 1), (num_owned_cells, markers.shape[1])) + np.arange(num_owned_cells, dtype=np.int32).reshape(-1, 1), + (num_owned_cells, markers.shape[1]), + ) indicator = unrolled_dofmap[markers].reshape(-1) local_cell[indicator] = cell_indicator[markers].reshape(-1) dof_pos[indicator] = local_indices[markers].reshape(-1) diff --git a/src/adios4dolfinx/writers.py b/src/adios4dolfinx/writers.py new file mode 100644 index 0000000..a83dc34 --- /dev/null +++ b/src/adios4dolfinx/writers.py @@ -0,0 +1,161 @@ +# Copyright (C) 2024 Jørgen Schartum Dokken +# +# This file is part of adios4dolfinx +# +# SPDX-License-Identifier: MIT + + +from pathlib import Path + +from mpi4py import MPI + +import adios2 +import numpy as np + +from .adios2_helpers import resolve_adios_scope +from .structures import FunctionData, MeshData + +adios2 = resolve_adios_scope(adios2) + + +def write_mesh( + comm: MPI.Intracomm, + mesh: MeshData, + filename: Path, + engine: str = "BP4", + mode: adios2.Mode = adios2.Mode.Write, + io_name: str = "MeshWriter", +): + """ + Write a mesh to file using ADIOS2 + + Parameters: + comm: MPI communicator used in storage + mesh: Internal data structure for the mesh data to save to file + filename: Path to file to write to + engine: ADIOS2 engine to use + mode: ADIOS2 mode to use (write or append) + io_name: Internal name used for the ADIOS IO object + """ + + gdim = mesh.local_geometry.shape[1] + adios = adios2.ADIOS(comm) + # TODO: add context manager here? + io = adios.DeclareIO(io_name) + io.SetEngine(engine) + outfile = io.Open(str(filename), mode) + + # Write geometry + pointvar = io.DefineVariable( + "Points", + mesh.local_geometry, + shape=[mesh.num_nodes_global, gdim], + start=[mesh.local_geometry_pos[0], 0], + count=[mesh.local_geometry_pos[1] - mesh.local_geometry_pos[0], gdim], + ) + outfile.Put(pointvar, mesh.local_geometry, adios2.Mode.Sync) + + # Write celltype + io.DefineAttribute("CellType", mesh.cell_type) + + # Write basix properties + io.DefineAttribute("Degree", np.array([mesh.degree], dtype=np.int32)) + io.DefineAttribute("LagrangeVariant", np.array([mesh.lagrange_variant], dtype=np.int32)) + + # Write topology + num_dofs_per_cell = mesh.local_topology.shape[1] + dvar = io.DefineVariable( + "Topology", + mesh.local_topology, + shape=[mesh.num_cells_global, num_dofs_per_cell], + start=[mesh.local_topology_pos[0], 0], + count=[ + mesh.local_topology_pos[1] - mesh.local_topology_pos[0], + num_dofs_per_cell, + ], + ) + + outfile.Put(dvar, mesh.local_topology) + outfile.PerformPuts() + outfile.EndStep() + outfile.Close() + assert adios.RemoveIO(io_name) + + +def write_function( + comm: MPI.Intracomm, + u: FunctionData, + filename: Path, + engine: str = "BP4", + mode: adios2.Mode = adios2.Mode.Append, + time: float = 0.0, + io_name: str = "FunctionWriter", +): + """ + Write a function to file using ADIOS2 + + Parameters: + comm: MPI communicator used in storage + u: Internal data structure for the function data to save to file + filename: Path to file to write to + engine: ADIOS2 engine to use + mode: ADIOS2 mode to use (write or append) + time: Time stamp associated with function + io_name: Internal name used for the ADIOS IO object + """ + adios = adios2.ADIOS(comm) + # TODO: add context manager here? + io = adios.DeclareIO(io_name) + io.SetEngine(engine) + outfile = io.Open(str(filename), mode) + + # Add mesh permutations + pvar = io.DefineVariable( + "CellPermutations", + u.cell_permutations, + shape=[u.num_cells_global], + start=[u.local_cell_range[0]], + count=[u.local_cell_range[1] - u.local_cell_range[0]], + ) + outfile.Put(pvar, u.cell_permutations) + dofmap_var = io.DefineVariable( + f"{u.name}_dofmap", + u.dofmap_array, + shape=[u.global_dofs_in_dofmap], + start=[u.dofmap_range[0]], + count=[u.dofmap_range[1] - u.dofmap_range[0]], + ) + outfile.Put(dofmap_var, u.dofmap_array) + + xdofmap_var = io.DefineVariable( + f"{u.name}_XDofmap", + u.dofmap_offsets, + shape=[u.num_cells_global + 1], + start=[u.local_cell_range[0]], + count=[u.local_cell_range[1] - u.local_cell_range[0] + 1], + ) + outfile.Put(xdofmap_var, u.dofmap_offsets) + + val_var = io.DefineVariable( + f"{u.name}_values", + u.values, + shape=[u.num_dofs_global], + start=[u.dof_range[0]], + count=[u.dof_range[1] - u.dof_range[0]], + ) + outfile.Put(val_var, u.values) + + # Add time step to file + t_arr = np.array([time], dtype=np.float64) + time_var = io.DefineVariable( + f"{u.name}_time", + t_arr, + shape=[1], + start=[0], + count=[1 if comm.rank == 0 else 0], + ) + outfile.Put(time_var, t_arr) + outfile.PerformPuts() + outfile.EndStep() + outfile.Close() + assert adios.RemoveIO(io_name) diff --git a/tests/create_legacy_checkpoint.py b/tests/create_legacy_checkpoint.py index 2c0842b..07a8fb7 100644 --- a/tests/create_legacy_checkpoint.py +++ b/tests/create_legacy_checkpoint.py @@ -11,15 +11,19 @@ import argparse import pathlib +from importlib.metadata import version -import numpy as np from mpi4py import MPI -import adios4dolfinx + import dolfinx -from importlib.metadata import version +import numpy as np + +import adios4dolfinx a4d_version = version("adios4dolfinx") -assert a4d_version < "0.7.2", f"Creating a legacy checkpoint requires adios4dolfinx < 0.7.2, you have {a4d_version}." +assert ( + a4d_version < "0.7.2" +), f"Creating a legacy checkpoint requires adios4dolfinx < 0.7.2, you have {a4d_version}." def f(x): @@ -39,7 +43,9 @@ def write_checkpoint(filename, mesh, el, f): def verify_checkpoint(filename, el, f): - mesh = adios4dolfinx.read_mesh(MPI.COMM_WORLD, filename, "BP4", dolfinx.mesh.GhostMode.shared_facet) + mesh = adios4dolfinx.read_mesh( + MPI.COMM_WORLD, filename, "BP4", dolfinx.mesh.GhostMode.shared_facet + ) V = dolfinx.fem.FunctionSpace(mesh, el) uh = dolfinx.fem.Function(V, dtype=np.float64) adios4dolfinx.read_function(uh, filename) @@ -51,9 +57,7 @@ def verify_checkpoint(filename, el, f): if __name__ == "__main__": - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--output-dir", type=str, default="legacy_checkpoint", dest="dir") inputs = parser.parse_args() diff --git a/tests/create_legacy_data.py b/tests/create_legacy_data.py index 7008592..ec79644 100644 --- a/tests/create_legacy_data.py +++ b/tests/create_legacy_data.py @@ -24,7 +24,7 @@ def create_reference_data( function_name: str, family: str, degree: int, - function_name_vec: str + function_name_vec: str, ) -> dolfin.Function: mesh = dolfin.UnitCubeMesh(1, 1, 1) V = dolfin.FunctionSpace(mesh, family, degree) @@ -45,18 +45,10 @@ def create_reference_data( with dolfin.XDMFFile(mesh.mpi_comm(), str(xdmf_file)) as xdmf: xdmf.write(mesh) - xdmf.write_checkpoint( - v0, function_name, 0, dolfin.XDMFFile.Encoding.HDF5, append=True - ) - xdmf.write_checkpoint( - w0, function_name_vec, 0, dolfin.XDMFFile.Encoding.HDF5, append=True - ) - xdmf.write_checkpoint( - v1, function_name, 1, dolfin.XDMFFile.Encoding.HDF5, append=True - ) - xdmf.write_checkpoint( - w1, function_name_vec, 1, dolfin.XDMFFile.Encoding.HDF5, append=True - ) + xdmf.write_checkpoint(v0, function_name, 0, dolfin.XDMFFile.Encoding.HDF5, append=True) + xdmf.write_checkpoint(w0, function_name_vec, 0, dolfin.XDMFFile.Encoding.HDF5, append=True) + xdmf.write_checkpoint(v1, function_name, 1, dolfin.XDMFFile.Encoding.HDF5, append=True) + xdmf.write_checkpoint(w1, function_name_vec, 1, dolfin.XDMFFile.Encoding.HDF5, append=True) with dolfin.XDMFFile(mesh.mpi_comm(), "test.xdmf") as xdmf: xdmf.write(mesh) @@ -71,7 +63,7 @@ def verify_hdf5( function_name: str, family: str, degree: int, - function_name_vec: str + function_name_vec: str, ): mesh = dolfin.Mesh() with dolfin.HDF5File(mesh.mpi_comm(), str(h5_file), "r") as hdf: @@ -97,7 +89,7 @@ def verify_xdmf( function_name: str, family: str, degree: int, - function_name_vec: str + function_name_vec: str, ): mesh = dolfin.Mesh() with dolfin.XDMFFile(mesh.mpi_comm(), str(xdmf_file)) as xdmf: @@ -122,9 +114,7 @@ def verify_xdmf( if __name__ == "__main__": - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--family", type=str, default="DG") parser.add_argument("--degree", type=int, default=2) parser.add_argument("--output-dir", type=str, default="legacy", dest="dir") @@ -145,14 +135,28 @@ def verify_xdmf( inputs.f_name, inputs.family, inputs.degree, - inputs.f_name_vec - + inputs.f_name_vec, ) verify_hdf5( - v0_ref, w0_ref, h5_filename, inputs.name, inputs.f_name, inputs.family, inputs.degree, inputs.f_name_vec, + v0_ref, + w0_ref, + h5_filename, + inputs.name, + inputs.f_name, + inputs.family, + inputs.degree, + inputs.f_name_vec, ) verify_xdmf( - v0_ref, w0_ref, v1_ref, w1_ref, xdmf_filename, inputs.f_name, inputs.family, inputs.degree, inputs.f_name_vec, + v0_ref, + w0_ref, + v1_ref, + w1_ref, + xdmf_filename, + inputs.f_name, + inputs.family, + inputs.degree, + inputs.f_name_vec, ) diff --git a/tests/test_checkpointing.py b/tests/test_checkpointing.py index c92b21d..23c5508 100644 --- a/tests/test_checkpointing.py +++ b/tests/test_checkpointing.py @@ -1,19 +1,32 @@ import itertools +from mpi4py import MPI + import basix import basix.ufl import dolfinx import numpy as np import pytest -from mpi4py import MPI -from .test_utils import read_function, write_function, get_dtype, read_function_time_dep, write_function_time_dep +from .test_utils import ( + get_dtype, + read_function, + read_function_time_dep, + write_function, + write_function_time_dep, +) dtypes = [np.float64, np.float32] # Mesh geometry dtypes write_comm = [MPI.COMM_SELF, MPI.COMM_WORLD] # Communicators for creating mesh -two_dimensional_cell_types = [dolfinx.mesh.CellType.triangle, dolfinx.mesh.CellType.quadrilateral] -three_dimensional_cell_types = [dolfinx.mesh.CellType.tetrahedron, dolfinx.mesh.CellType.hexahedron] +two_dimensional_cell_types = [ + dolfinx.mesh.CellType.triangle, + dolfinx.mesh.CellType.quadrilateral, +] +three_dimensional_cell_types = [ + dolfinx.mesh.CellType.tetrahedron, + dolfinx.mesh.CellType.hexahedron, +] two_dim_combinations = itertools.product(dtypes, two_dimensional_cell_types, write_comm) three_dim_combinations = itertools.product(dtypes, three_dimensional_cell_types, write_comm) @@ -34,25 +47,30 @@ def mesh_3D(request): return mesh -@pytest.mark.parametrize("complex", [True, False]) +@pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_P_2D(read_comm, family, degree, complex, mesh_2D): +def test_read_write_P_2D(read_comm, family, degree, is_complex, mesh_2D): mesh = mesh_2D - f_dtype = get_dtype(mesh.geometry.x.dtype, complex) + f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) - el = basix.ufl.element(family, - mesh.ufl_cell().cellname(), - degree, - basix.LagrangeVariant.gll_warped, - shape=(mesh.geometry.dim, ), - dtype=mesh.geometry.x.dtype) + el = basix.ufl.element( + family, + mesh.ufl_cell().cellname(), + degree, + basix.LagrangeVariant.gll_warped, + shape=(mesh.geometry.dim,), + dtype=mesh.geometry.x.dtype, + ) def f(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) - values[0] = np.full(x.shape[1], np.pi) + x[0] + x[1] * 1j - values[1] = x[0] + 3j * x[1] + values[0] = np.full(x.shape[1], np.pi) + x[0] + values[1] = x[0] + if is_complex: + values[0] += 1j * x[1] + values[1] -= 3j * x[1] return values hash = write_function(mesh, el, f, f_dtype) @@ -60,24 +78,29 @@ def f(x): read_function(read_comm, el, f, hash, f_dtype) -@pytest.mark.parametrize("complex", [True, False]) +@pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_P_3D(read_comm, family, degree, complex, mesh_3D): +def test_read_write_P_3D(read_comm, family, degree, is_complex, mesh_3D): mesh = mesh_3D - f_dtype = get_dtype(mesh.geometry.x.dtype, complex) - el = basix.ufl.element(family, - mesh.ufl_cell().cellname(), - degree, - basix.LagrangeVariant.gll_warped, - shape=(mesh.geometry.dim, )) + f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) + el = basix.ufl.element( + family, + mesh.ufl_cell().cellname(), + degree, + basix.LagrangeVariant.gll_warped, + shape=(mesh.geometry.dim,), + ) def f(x): values = np.empty((3, x.shape[1]), dtype=f_dtype) - values[0] = np.pi + x[0] + 2j*x[2] + values[0] = np.pi + x[0] values[1] = x[1] + 2 * x[0] - values[2] = 1j*x[1] + np.cos(x[2]) + values[2] = np.cos(x[2]) + if is_complex: + values[0] -= 2j * x[2] + values[2] += 1j * x[1] return values hash = write_function(mesh, el, f, f_dtype) @@ -86,31 +109,39 @@ def f(x): read_function(read_comm, el, f, hash, f_dtype) -@pytest.mark.parametrize("complex", [True, False]) +@pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_P_2D_time(read_comm, family, degree, complex, mesh_2D): +def test_read_write_P_2D_time(read_comm, family, degree, is_complex, mesh_2D): mesh = mesh_2D - f_dtype = get_dtype(mesh.geometry.x.dtype, complex) + f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) - el = basix.ufl.element(family, - mesh.ufl_cell().cellname(), - degree, - basix.LagrangeVariant.gll_warped, - shape=(mesh.geometry.dim, ), - dtype=mesh.geometry.x.dtype) + el = basix.ufl.element( + family, + mesh.ufl_cell().cellname(), + degree, + basix.LagrangeVariant.gll_warped, + shape=(mesh.geometry.dim,), + dtype=mesh.geometry.x.dtype, + ) def f0(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) - values[0] = np.full(x.shape[1], np.pi) + x[0] + x[1] * 1j - values[1] = x[0] + 3j * x[1] + values[0] = np.full(x.shape[1], np.pi) + x[0] + values[1] = x[0] + if is_complex: + values[0] += x[1] * 1j + values[1] -= 3j * x[1] return values def f1(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) - values[0] = 2*np.full(x.shape[1], np.pi) + x[0] + x[1] * 1j - values[1] = -x[0] + 3j * x[1] + 2*x[1] + values[0] = 2 * np.full(x.shape[1], np.pi) + x[0] + values[1] = -x[0] + 2 * x[1] + if is_complex: + values[0] += x[1] * 1j + values[1] += 3j * x[1] return values t0 = 0.8 @@ -120,31 +151,40 @@ def f1(x): read_function_time_dep(read_comm, el, f0, f1, t0, t1, hash, f_dtype) -@pytest.mark.parametrize("complex", [True, False]) +@pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_P_3D_time(read_comm, family, degree, complex, mesh_3D): +def test_read_write_P_3D_time(read_comm, family, degree, is_complex, mesh_3D): mesh = mesh_3D - f_dtype = get_dtype(mesh.geometry.x.dtype, complex) - el = basix.ufl.element(family, - mesh.ufl_cell().cellname(), - degree, - basix.LagrangeVariant.gll_warped, - shape=(mesh.geometry.dim, )) + f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) + el = basix.ufl.element( + family, + mesh.ufl_cell().cellname(), + degree, + basix.LagrangeVariant.gll_warped, + shape=(mesh.geometry.dim,), + ) def f(x): values = np.empty((3, x.shape[1]), dtype=f_dtype) - values[0] = np.pi + x[0] + 2j*x[2] + values[0] = np.pi + x[0] values[1] = x[1] + 2 * x[0] - values[2] = 1j*x[1] + np.cos(x[2]) + values[2] = np.cos(x[2]) + if is_complex: + values[0] += 2j * x[2] + values[2] += 5j * x[1] return values def g(x): values = np.empty((3, x.shape[1]), dtype=f_dtype) - values[0] = x[0] + np.pi * 2j*x[2] - values[1] = 1j*x[2] + 2 * x[0] - values[2] = x[0] + 1j*np.cos(x[1]) + values[0] = x[0] + values[1] = 2 * x[0] + values[2] = x[0] + if is_complex: + values[0] += np.pi * 2j * x[2] + values[1] += 1j * x[2] + values[2] += 1j * np.cos(x[1]) return values t0 = 0.1 diff --git a/tests/test_checkpointing_vector.py b/tests/test_checkpointing_vector.py index 42b30db..a22d95e 100644 --- a/tests/test_checkpointing_vector.py +++ b/tests/test_checkpointing_vector.py @@ -49,21 +49,22 @@ def non_simplex_mesh_3D(request): return mesh -@pytest.mark.parametrize("complex", [True, False]) +@pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["N1curl", "RT"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_2D(read_comm, family, degree, complex, simplex_mesh_2D): +def test_read_write_2D(read_comm, family, degree, is_complex, simplex_mesh_2D): mesh = simplex_mesh_2D - f_dtype = get_dtype(mesh.geometry.x.dtype, complex) - el = basix.ufl.element(family, - mesh.ufl_cell().cellname(), - degree) + f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) + el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree) def f(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) - values[0] = np.full(x.shape[1], np.pi) + x[0] + 2j*x[1] - values[1] = x[1] + 2j*x[0] + values[0] = np.full(x.shape[1], np.pi) + x[0] + values[1] = x[1] + if is_complex: + values[0] += 2j * x[1] + values[1] += 2j * x[0] return values hash = write_function(mesh, el, f, f_dtype) @@ -71,43 +72,46 @@ def f(x): read_function(read_comm, el, f, hash, f_dtype) -@pytest.mark.parametrize("complex", [True, False]) +@pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["N1curl", "RT"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_3D(read_comm, family, degree, complex, simplex_mesh_3D): +def test_read_write_3D(read_comm, family, degree, is_complex, simplex_mesh_3D): mesh = simplex_mesh_3D - f_dtype = get_dtype(mesh.geometry.x.dtype, complex) - el = basix.ufl.element(family, - mesh.ufl_cell().cellname(), - degree) + f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) + el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree) def f(x): values = np.empty((3, x.shape[1]), dtype=f_dtype) - values[0] = np.full(x.shape[1], np.pi) + 2j*x[2] - values[1] = x[1] + 2 * x[0] + 2j*np.cos(x[2]) + values[0] = np.full(x.shape[1], np.pi) + values[1] = x[1] + 2 * x[0] values[2] = np.cos(x[2]) + if is_complex: + values[0] += 2j * x[2] + values[1] += 2j * np.cos(x[2]) return values + hash = write_function(mesh, el, f, dtype=f_dtype) MPI.COMM_WORLD.Barrier() read_function(read_comm, el, f, hash, dtype=f_dtype) -@pytest.mark.parametrize("complex", [True, False]) +@pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["RTCF"]) @pytest.mark.parametrize("degree", [1, 2, 3]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_2D_quad(read_comm, family, degree, complex, non_simplex_mesh_2D): +def test_read_write_2D_quad(read_comm, family, degree, is_complex, non_simplex_mesh_2D): mesh = non_simplex_mesh_2D - f_dtype = get_dtype(mesh.geometry.x.dtype, complex) - el = basix.ufl.element(family, - mesh.ufl_cell().cellname(), - degree) + f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) + el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree) def f(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) - values[0] = np.full(x.shape[1], np.pi) + 2j*x[2] - values[1] = x[1] + 2 * x[0] + 2j*np.cos(x[2]) + values[0] = np.full(x.shape[1], np.pi) + values[1] = x[1] + 2 * x[0] + if is_complex: + values[0] += 2j * x[2] + values[1] += 2j * np.cos(x[2]) return values hash = write_function(mesh, el, f, f_dtype) @@ -115,22 +119,23 @@ def f(x): read_function(read_comm, el, f, hash, f_dtype) -@pytest.mark.parametrize("complex", [True, False]) +@pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["NCF"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_hex(read_comm, family, degree, complex, non_simplex_mesh_3D): +def test_read_write_hex(read_comm, family, degree, is_complex, non_simplex_mesh_3D): mesh = non_simplex_mesh_3D - f_dtype = get_dtype(mesh.geometry.x.dtype, complex) - el = basix.ufl.element(family, - mesh.ufl_cell().cellname(), - degree) + f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) + el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree) def f(x): values = np.empty((3, x.shape[1]), dtype=f_dtype) values[0] = np.full(x.shape[1], np.pi) + x[0] values[1] = np.cos(x[2]) - values[2] = 1j*x[1] + x[0] + values[2] = x[0] + if is_complex: + values[0] += 2j * x[2] + values[2] -= 1j * x[1] return values hash = write_function(mesh, el, f, dtype=f_dtype) @@ -138,27 +143,31 @@ def f(x): read_function(read_comm, el, f, hash, dtype=f_dtype) -@pytest.mark.parametrize("complex", [True, False]) +@pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["RTCF"]) @pytest.mark.parametrize("degree", [1, 2, 3]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_multiple(read_comm, family, degree, complex, non_simplex_mesh_2D): +def test_read_write_multiple(read_comm, family, degree, is_complex, non_simplex_mesh_2D): mesh = non_simplex_mesh_2D - f_dtype = get_dtype(mesh.geometry.x.dtype, complex) - el = basix.ufl.element(family, - mesh.ufl_cell().cellname(), - degree) + f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) + el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree) def f(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) - values[0] = np.full(x.shape[1], np.pi) + 2j*x[2] - values[1] = x[1] + 2 * x[0] + 2j*np.cos(x[2]) + values[0] = np.full(x.shape[1], np.pi) + values[1] = x[1] + 2 * x[0] + if is_complex: + values[0] -= 2j * x[2] + values[1] += 2j * np.cos(x[2]) return values def g(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) - values[0] = (1+1j) * x[0] - values[1] = (3-3j) * x[1] + values[0] = 2 * x[1] + values[1] = 3 * x[0] + if is_complex: + values[0] += 3j * x[0] + values[1] += 2j * x[0] * x[1] return values hash_f = write_function(mesh, el, f, dtype=f_dtype, name="f", append=False) diff --git a/tests/test_legacy_readers.py b/tests/test_legacy_readers.py index a73152f..7f7069c 100644 --- a/tests/test_legacy_readers.py +++ b/tests/test_legacy_readers.py @@ -15,8 +15,12 @@ import ufl from dolfinx.fem.petsc import LinearProblem -from adios4dolfinx import (read_function, read_function_from_legacy_h5, - read_mesh, read_mesh_from_legacy_h5) +from adios4dolfinx import ( + read_function, + read_function_from_legacy_h5, + read_mesh, + read_mesh_from_legacy_h5, +) def test_legacy_mesh(): @@ -80,18 +84,16 @@ def test_legacy_function(): L = ufl.inner(f, v) * ufl.dx uh = dolfinx.fem.Function(V) - problem = LinearProblem( - a, L, [], uh, petsc_options={"ksp_type": "preonly", "pc_type": "lu"} - ) + problem = LinearProblem(a, L, [], uh, petsc_options={"ksp_type": "preonly", "pc_type": "lu"}) problem.solve() u_in = dolfinx.fem.Function(V) read_function_from_legacy_h5(mesh.comm, path, u_in, group="v") np.testing.assert_allclose(uh.x.array, u_in.x.array, atol=1e-14) - W = dolfinx.fem.functionspace(mesh, ("DG", 2, (mesh.geometry.dim, ))) + W = dolfinx.fem.functionspace(mesh, ("DG", 2, (mesh.geometry.dim,))) wh = dolfinx.fem.Function(W) - wh.interpolate(lambda x: (x[0], 3*x[2], 7*x[1])) + wh.interpolate(lambda x: (x[0], 3 * x[2], 7 * x[1])) w_in = dolfinx.fem.Function(W) read_function_from_legacy_h5(mesh.comm, path, w_in, group="w") @@ -115,29 +117,27 @@ def test_read_legacy_function_from_checkpoint(): L = ufl.inner(f, v) * ufl.dx uh = dolfinx.fem.Function(V) - problem = LinearProblem( - a, L, [], uh, petsc_options={"ksp_type": "preonly", "pc_type": "lu"} - ) + problem = LinearProblem(a, L, [], uh, petsc_options={"ksp_type": "preonly", "pc_type": "lu"}) problem.solve() u_in = dolfinx.fem.Function(V) - read_function_from_legacy_h5(mesh.comm, path, u_in, group="v", step=0) + read_function_from_legacy_h5(mesh.comm, path, u_in, group="v", step=0) assert np.allclose(uh.x.array, u_in.x.array) # Check second step uh.interpolate(lambda x: x[0]) - read_function_from_legacy_h5(mesh.comm, path, u_in, group="v", step=1) + read_function_from_legacy_h5(mesh.comm, path, u_in, group="v", step=1) assert np.allclose(uh.x.array, u_in.x.array) - W = dolfinx.fem.functionspace(mesh, ("DG", 2, (mesh.geometry.dim, ))) + W = dolfinx.fem.functionspace(mesh, ("DG", 2, (mesh.geometry.dim,))) wh = dolfinx.fem.Function(W) - wh.interpolate(lambda x: (x[0], 3*x[2], 7*x[1])) + wh.interpolate(lambda x: (x[0], 3 * x[2], 7 * x[1])) w_in = dolfinx.fem.Function(W) read_function_from_legacy_h5(mesh.comm, path, w_in, group="w", step=0) np.testing.assert_allclose(wh.x.array, w_in.x.array, atol=1e-14) - wh.interpolate(lambda x: np.vstack((x[0], 0*x[0], x[1]))) + wh.interpolate(lambda x: np.vstack((x[0], 0 * x[0], x[1]))) read_function_from_legacy_h5(mesh.comm, path, w_in, group="w", step=1) np.testing.assert_allclose(wh.x.array, w_in.x.array, atol=1e-14) diff --git a/tests/test_mesh_writer.py b/tests/test_mesh_writer.py index 776d166..e234e13 100644 --- a/tests/test_mesh_writer.py +++ b/tests/test_mesh_writer.py @@ -34,9 +34,7 @@ def test_mesh_read_writer(encoder, suffix, ghost_mode): mesh.comm.Barrier() start = time.perf_counter() - mesh_adios = read_mesh( - MPI.COMM_WORLD, file.with_suffix(suffix), encoder, ghost_mode - ) + mesh_adios = read_mesh(MPI.COMM_WORLD, file.with_suffix(suffix), encoder, ghost_mode) end = time.perf_counter() print(f"Read ADIOS2 mesh: {end-start}") mesh.comm.Barrier() @@ -58,13 +56,9 @@ def test_mesh_read_writer(encoder, suffix, ghost_mode): # Check that integration over different entities are consistent for measure in [ufl.ds, ufl.dS, ufl.dx]: - c_adios = dolfinx.fem.assemble_scalar( - dolfinx.fem.form(1 * measure(domain=mesh_adios)) - ) + c_adios = dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * measure(domain=mesh_adios))) c_ref = dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * measure(domain=mesh))) - c_xdmf = dolfinx.fem.assemble_scalar( - dolfinx.fem.form(1 * measure(domain=mesh_xdmf)) - ) + c_xdmf = dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * measure(domain=mesh_xdmf))) assert np.isclose( mesh_adios.comm.allreduce(c_adios, MPI.SUM), mesh.comm.allreduce(c_xdmf, MPI.SUM), diff --git a/tests/test_meshtags.py b/tests/test_meshtags.py index 352142d..e1f4535 100644 --- a/tests/test_meshtags.py +++ b/tests/test_meshtags.py @@ -7,8 +7,8 @@ import dolfinx import numpy as np import numpy.typing as npt - import pytest + import adios4dolfinx root = 0 @@ -33,9 +33,7 @@ one_dim_combinations = itertools.product(dtypes, write_comm) two_dim_combinations = itertools.product(dtypes, two_dimensional_cell_types, write_comm) -three_dim_combinations = itertools.product( - dtypes, three_dimensional_cell_types, write_comm -) +three_dim_combinations = itertools.product(dtypes, three_dimensional_cell_types, write_comm) @pytest.fixture(params=one_dim_combinations, scope="module") @@ -115,7 +113,6 @@ def test_checkpointing_meshtags_1D(mesh_1D, read_comm, read_mode): # unique number (their initial global index). org_maps = [] for dim in range(mesh.topology.dim + 1): - mesh.topology.create_connectivity(dim, mesh.topology.dim) e_map = mesh.topology.index_map(dim) num_entities_local = e_map.size_local @@ -140,9 +137,7 @@ def test_checkpointing_meshtags_1D(mesh_1D, read_comm, read_mode): MPI.COMM_WORLD.Barrier() # Read mesh on testing communicator - new_mesh = adios4dolfinx.read_mesh( - read_comm, filename, engine="BP4", ghost_mode=read_mode - ) + new_mesh = adios4dolfinx.read_mesh(read_comm, filename, engine="BP4", ghost_mode=read_mode) for dim in range(new_mesh.topology.dim + 1): # Read meshtags on all processes if testing communicator has multiple ranks # else read on root 0 @@ -183,7 +178,6 @@ def test_checkpointing_meshtags_2D(mesh_2D, read_comm, read_mode): org_maps = [] for dim in range(mesh.topology.dim + 1): - mesh.topology.create_connectivity(dim, mesh.topology.dim) e_map = mesh.topology.index_map(dim) num_entities_local = e_map.size_local @@ -202,9 +196,7 @@ def test_checkpointing_meshtags_2D(mesh_2D, read_comm, read_mode): del ft del mesh MPI.COMM_WORLD.Barrier() - new_mesh = adios4dolfinx.read_mesh( - read_comm, filename, engine="BP4", ghost_mode=read_mode - ) + new_mesh = adios4dolfinx.read_mesh(read_comm, filename, engine="BP4", ghost_mode=read_mode) for dim in range(new_mesh.topology.dim + 1): if read_comm.size != 1: new_ft = adios4dolfinx.read_meshtags( @@ -240,7 +232,6 @@ def test_checkpointing_meshtags_3D(mesh_3D, read_comm, read_mode): org_maps = [] for dim in range(mesh.topology.dim + 1): - mesh.topology.create_connectivity(dim, mesh.topology.dim) e_map = mesh.topology.index_map(dim) num_entities_local = e_map.size_local @@ -261,9 +252,7 @@ def test_checkpointing_meshtags_3D(mesh_3D, read_comm, read_mode): del mesh MPI.COMM_WORLD.Barrier() - new_mesh = adios4dolfinx.read_mesh( - read_comm, filename, engine="BP4", ghost_mode=read_mode - ) + new_mesh = adios4dolfinx.read_mesh(read_comm, filename, engine="BP4", ghost_mode=read_mode) for dim in range(new_mesh.topology.dim + 1): if read_comm.size != 1: new_ft = adios4dolfinx.read_meshtags( diff --git a/tests/test_numpy_vectorization.py b/tests/test_numpy_vectorization.py index 88dcfd2..2f01bb3 100644 --- a/tests/test_numpy_vectorization.py +++ b/tests/test_numpy_vectorization.py @@ -13,7 +13,10 @@ write_comm = [MPI.COMM_SELF, MPI.COMM_WORLD] # Communicators for creating mesh ghost_mode = [dolfinx.mesh.GhostMode.none, dolfinx.mesh.GhostMode.shared_facet] -two_dimensional_cell_types = [dolfinx.mesh.CellType.triangle, dolfinx.mesh.CellType.quadrilateral] +two_dimensional_cell_types = [ + dolfinx.mesh.CellType.triangle, + dolfinx.mesh.CellType.quadrilateral, +] three_dimensional_cell_types = [dolfinx.mesh.CellType.hexahedron] two_dim_combinations = itertools.product(two_dimensional_cell_types, write_comm, ghost_mode) @@ -23,7 +26,9 @@ @pytest.fixture(params=two_dim_combinations, scope="module") def mesh_2D(request): cell_type, write_comm, ghost_mode = request.param - mesh = dolfinx.mesh.create_unit_square(write_comm, 10, 10, cell_type=cell_type, ghost_mode=ghost_mode) + mesh = dolfinx.mesh.create_unit_square( + write_comm, 10, 10, cell_type=cell_type, ghost_mode=ghost_mode + ) return mesh @@ -31,7 +36,9 @@ def mesh_2D(request): def mesh_3D(request): cell_type, write_comm, ghost_mode = request.param M = 5 - mesh = dolfinx.mesh.create_unit_cube(write_comm, M, M, M, cell_type=cell_type, ghost_mode=ghost_mode) + mesh = dolfinx.mesh.create_unit_cube( + write_comm, M, M, M, cell_type=cell_type, ghost_mode=ghost_mode + ) return mesh @@ -61,18 +68,18 @@ def compute_positions( @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) def test_unroll_P(family, degree, mesh_2D): - V = dolfinx.fem.functionspace(mesh_2D, (family, degree)) dofmap = V.dofmap unrolled_map = unroll_dofmap(dofmap.list, dofmap.bs) - normal_unroll = np.zeros((dofmap.list.shape[0], dofmap.list.shape[1] * dofmap.bs), - dtype=np.int32) + normal_unroll = np.zeros( + (dofmap.list.shape[0], dofmap.list.shape[1] * dofmap.bs), dtype=np.int32 + ) for i, dofs in enumerate(dofmap.list): for j, dof in enumerate(dofs): for k in range(dofmap.bs): - normal_unroll[i, j * dofmap.bs+k] = dof * dofmap.bs + k + normal_unroll[i, j * dofmap.bs + k] = dof * dofmap.bs + k np.testing.assert_allclose(unrolled_map, normal_unroll) @@ -80,21 +87,20 @@ def test_unroll_P(family, degree, mesh_2D): @pytest.mark.parametrize("family", ["RTCF"]) @pytest.mark.parametrize("degree", [1, 2, 3]) def test_unroll_RTCF(family, degree, mesh_3D): - el = basix.ufl.element(family, - mesh_3D.ufl_cell().cellname(), - degree) + el = basix.ufl.element(family, mesh_3D.ufl_cell().cellname(), degree) V = dolfinx.fem.functionspace(mesh_3D, el) dofmap = V.dofmap unrolled_map = unroll_dofmap(dofmap.list, dofmap.bs) - normal_unroll = np.zeros((dofmap.list.shape[0], dofmap.list.shape[1] * dofmap.bs), - dtype=np.int32) + normal_unroll = np.zeros( + (dofmap.list.shape[0], dofmap.list.shape[1] * dofmap.bs), dtype=np.int32 + ) for i, dofs in enumerate(dofmap.list): for j, dof in enumerate(dofs): for k in range(dofmap.bs): - normal_unroll[i, j * dofmap.bs+k] = dof * dofmap.bs + k + normal_unroll[i, j * dofmap.bs + k] = dof * dofmap.bs + k np.testing.assert_allclose(unrolled_map, normal_unroll) @@ -102,16 +108,16 @@ def test_unroll_RTCF(family, degree, mesh_3D): @pytest.mark.parametrize("family", ["RTCF"]) @pytest.mark.parametrize("degree", [1, 2, 3]) def test_compute_dofmap_pos_RTCF(family, degree, mesh_3D): - el = basix.ufl.element(family, - mesh_3D.ufl_cell().cellname(), - degree) + el = basix.ufl.element(family, mesh_3D.ufl_cell().cellname(), degree) V = dolfinx.fem.functionspace(mesh_3D, el) local_cells, local_pos = compute_dofmap_pos(V) num_cells_local = mesh_3D.topology.index_map(mesh_3D.topology.dim).size_local num_dofs_local = V.dofmap.index_map.size_local * V.dofmap.index_map_bs - reference_cells, reference_pos = compute_positions(V.dofmap.list, V.dofmap.bs, num_dofs_local, num_cells_local) + reference_cells, reference_pos = compute_positions( + V.dofmap.list, V.dofmap.bs, num_dofs_local, num_cells_local + ) np.testing.assert_allclose(reference_cells, local_cells) np.testing.assert_allclose(reference_pos, local_pos) @@ -119,16 +125,16 @@ def test_compute_dofmap_pos_RTCF(family, degree, mesh_3D): @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) def test_compute_dofmap_pos_P(family, degree, mesh_2D): - el = basix.ufl.element(family, - mesh_2D.ufl_cell().cellname(), - degree) + el = basix.ufl.element(family, mesh_2D.ufl_cell().cellname(), degree) V = dolfinx.fem.functionspace(mesh_2D, el) local_cells, local_pos = compute_dofmap_pos(V) num_cells_local = mesh_2D.topology.index_map(mesh_2D.topology.dim).size_local num_dofs_local = V.dofmap.index_map.size_local * V.dofmap.index_map_bs - reference_cells, reference_pos = compute_positions(V.dofmap.list, V.dofmap.bs, num_dofs_local, num_cells_local) + reference_cells, reference_pos = compute_positions( + V.dofmap.list, V.dofmap.bs, num_dofs_local, num_cells_local + ) np.testing.assert_allclose(reference_cells, local_cells) np.testing.assert_allclose(reference_pos, local_pos) @@ -153,6 +159,6 @@ def test_compute_send_sizes(): out_size[j] += 1 break - process_pos_indicator = (data_owners.reshape(-1, 1) == dest_ranks) + process_pos_indicator = data_owners.reshape(-1, 1) == dest_ranks vectorized_out_size = np.count_nonzero(process_pos_indicator, axis=0) np.testing.assert_allclose(vectorized_out_size, out_size) diff --git a/tests/test_original_checkpoint.py b/tests/test_original_checkpoint.py new file mode 100644 index 0000000..1fc86c6 --- /dev/null +++ b/tests/test_original_checkpoint.py @@ -0,0 +1,540 @@ +import itertools +from pathlib import Path +from typing import Callable + +from mpi4py import MPI + +import basix +import basix.ufl +import dolfinx +import ipyparallel as ipp +import numpy as np +import pytest + +import adios4dolfinx + +from .test_utils import get_dtype + +dtypes = [np.float64, np.float32] # Mesh geometry dtypes + +two_dimensional_cell_types = [ + dolfinx.mesh.CellType.triangle, + dolfinx.mesh.CellType.quadrilateral, +] +three_dimensional_cell_types = [ + dolfinx.mesh.CellType.tetrahedron, + dolfinx.mesh.CellType.hexahedron, +] + +two_dim_combinations = itertools.product(dtypes, two_dimensional_cell_types) +three_dim_combinations = itertools.product(dtypes, three_dimensional_cell_types) + + +@pytest.fixture(scope="module") +def create_simplex_mesh_2D(): + mesh = dolfinx.mesh.create_unit_square( + MPI.COMM_WORLD, + 10, + 10, + cell_type=dolfinx.mesh.CellType.triangle, + dtype=np.float64, + ) + fname = Path("output/original_mesh_2D_simplex.xdmf") + with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: + xdmf.write_mesh(mesh) + return fname + + +@pytest.fixture(scope="module") +def create_simplex_mesh_3D(): + mesh = dolfinx.mesh.create_unit_cube( + MPI.COMM_WORLD, + 5, + 5, + 5, + cell_type=dolfinx.mesh.CellType.tetrahedron, + dtype=np.float64, + ) + fname = Path("output/original_mesh_3D_simplex.xdmf") + with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: + xdmf.write_mesh(mesh) + return fname + + +@pytest.fixture(scope="module") +def create_non_simplex_mesh_2D(): + mesh = dolfinx.mesh.create_unit_square( + MPI.COMM_WORLD, + 10, + 10, + cell_type=dolfinx.mesh.CellType.quadrilateral, + dtype=np.float64, + ) + fname = Path("output/original_mesh_2D_non_simplex.xdmf") + with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: + xdmf.write_mesh(mesh) + return fname + + +@pytest.fixture(scope="module") +def create_non_simplex_mesh_3D(): + mesh = dolfinx.mesh.create_unit_cube( + MPI.COMM_WORLD, + 5, + 5, + 5, + cell_type=dolfinx.mesh.CellType.hexahedron, + dtype=np.float64, + ) + fname = Path("output/original_mesh_3D_non_simplex.xdmf") + with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: + xdmf.write_mesh(mesh) + return fname + + +@pytest.fixture(params=two_dim_combinations, scope="module") +def create_2D_mesh(request): + dtype, cell_type = request.param + mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 5, 7, cell_type=cell_type, dtype=dtype) + fname = Path("output/original_mesh_2D_{dtype}_{cell_type}.xdmf") + with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: + xdmf.write_mesh(mesh) + return fname + + +@pytest.fixture(params=three_dim_combinations, scope="module") +def create_3D_mesh(request): + dtype, cell_type = request.param + mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, 5, 7, 3, cell_type=cell_type, dtype=dtype) + fname = Path("output/original_mesh_3D_{dtype}_{cell_type}.xdmf") + with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: + xdmf.write_mesh(mesh) + return fname + + +@pytest.fixture(scope="module") +def cluster(): + cluster = ipp.Cluster(engines="mpi", n=2) + rc = cluster.start_and_connect_sync() + yield rc + cluster.stop_cluster_sync() + + +def write_function( + write_mesh: bool, + mesh: dolfinx.mesh.Mesh, + el: basix.ufl._ElementBase, + f: Callable[[np.ndarray], np.ndarray], + dtype: np.dtype, + name: str, +) -> Path: + """Convenience function for writing function to file on the original input mesh""" + V = dolfinx.fem.functionspace(mesh, el) + uh = dolfinx.fem.Function(V, dtype=dtype) + uh.interpolate(f) + uh.name = name + el_hash = ( + V.element.signature() + .replace(" ", "") + .replace(",", "") + .replace("(", "") + .replace(")", "") + .replace("[", "") + .replace("]", "") + ) + + file_hash = f"{el_hash}_{np.dtype(dtype).name}" + filename = Path(f"output/mesh_{file_hash}.bp") + + if write_mesh: + adios4dolfinx.write_mesh_input_order(mesh, filename) + adios4dolfinx.write_function_on_input_mesh(uh, filename, time=0.0) + return filename + + +def read_function( + mesh_fname: Path, + u_fname: Path, + u_name: str, + family: str, + degree: int, + f: Callable[[np.ndarray], np.ndarray], + u_dtype: np.dtype, +): + """ + Convenience function for reading mesh with IPython-parallel and compare to exact solution + """ + from mpi4py import MPI + + import dolfinx + + import adios4dolfinx + + assert MPI.COMM_WORLD.size > 1 + if mesh_fname.suffix == ".xdmf": + with dolfinx.io.XDMFFile(MPI.COMM_WORLD, mesh_fname, "r") as xdmf: + mesh = xdmf.read_mesh() + elif mesh_fname.suffix == ".bp": + mesh = adios4dolfinx.read_mesh( + MPI.COMM_WORLD, mesh_fname, "BP4", dolfinx.mesh.GhostMode.shared_facet + ) + el = basix.ufl.element( + family, + mesh.ufl_cell().cellname(), + degree, + basix.LagrangeVariant.gll_warped, + shape=(mesh.geometry.dim,), + dtype=mesh.geometry.x.dtype, + ) + + V = dolfinx.fem.functionspace(mesh, el) + u = dolfinx.fem.Function(V, name=u_name, dtype=u_dtype) + adios4dolfinx.read_function(u, u_fname, time=0.0) + MPI.COMM_WORLD.Barrier() + + u_ex = dolfinx.fem.Function(V, name="exact", dtype=u_dtype) + u_ex.interpolate(f) + u_ex.x.scatter_forward() + atol = 10 * np.finfo(u_dtype).resolution + np.testing.assert_allclose(u.x.array, u_ex.x.array, atol=atol) # type: ignore + + +def write_function_vector( + write_mesh: bool, + fname: Path, + family: str, + degree: int, + f: Callable[[np.ndarray], np.ndarray], + dtype: np.dtype, + name: str, +) -> Path: + """Convenience function for writing function to file on the original input mesh""" + from mpi4py import MPI + + import basix.ufl + import dolfinx + + import adios4dolfinx + + assert MPI.COMM_WORLD.size > 1 + with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "r") as xdmf: + mesh = xdmf.read_mesh() + el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree, dtype=mesh.geometry.x.dtype) + V = dolfinx.fem.functionspace(mesh, el) + uh = dolfinx.fem.Function(V, dtype=dtype) + uh.interpolate(f) + uh.name = name + el_hash = ( + V.element.signature() + .replace(" ", "") + .replace(",", "") + .replace("(", "") + .replace(")", "") + .replace("[", "") + .replace("]", "") + ) + + file_hash = f"{el_hash}_{np.dtype(dtype).name}" + filename = Path(f"output/mesh_{file_hash}.bp") + + if write_mesh: + adios4dolfinx.write_mesh_input_order(mesh, filename) + adios4dolfinx.write_function_on_input_mesh(uh, filename, time=0.0) + return filename + + +def read_function_vector( + mesh_fname: Path, + u_fname: Path, + u_name: str, + family: str, + degree: int, + f: Callable[[np.ndarray], np.ndarray], + u_dtype: np.dtype, +): + """ + Convenience function for reading mesh with IPython-parallel and compare to exact solution + """ + + if mesh_fname.suffix == ".xdmf": + with dolfinx.io.XDMFFile(MPI.COMM_WORLD, mesh_fname, "r") as xdmf: + mesh = xdmf.read_mesh() + elif mesh_fname.suffix == ".bp": + mesh = adios4dolfinx.read_mesh( + MPI.COMM_WORLD, mesh_fname, "BP4", dolfinx.mesh.GhostMode.shared_facet + ) + el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree) + + V = dolfinx.fem.functionspace(mesh, el) + u = dolfinx.fem.Function(V, name=u_name, dtype=u_dtype) + adios4dolfinx.read_function(u, u_fname, time=0.0) + MPI.COMM_WORLD.Barrier() + + u_ex = dolfinx.fem.Function(V, name="exact", dtype=u_dtype) + u_ex.interpolate(f) + u_ex.x.scatter_forward() + atol = 10 * np.finfo(u_dtype).resolution + np.testing.assert_allclose(u.x.array, u_ex.x.array, atol=atol) # type: ignore + + +@pytest.mark.skipif(MPI.COMM_WORLD.size > 1, reason="Test uses ipythonparallel for MPI") +@pytest.mark.parametrize("is_complex", [True, False]) +@pytest.mark.parametrize("family", ["Lagrange", "DG"]) +@pytest.mark.parametrize("degree", [1, 4]) +@pytest.mark.parametrize("write_mesh", [True, False]) +def test_read_write_P_2D(write_mesh, family, degree, is_complex, create_2D_mesh, cluster): + fname = create_2D_mesh + with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "r") as xdmf: + mesh = xdmf.read_mesh() + f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) + + el = basix.ufl.element( + family, + mesh.ufl_cell().cellname(), + degree, + basix.LagrangeVariant.gll_warped, + shape=(mesh.geometry.dim,), + dtype=mesh.geometry.x.dtype, + ) + + def f(x): + values = np.empty((2, x.shape[1]), dtype=f_dtype) + values[0] = np.full(x.shape[1], np.pi) + x[0] + values[1] = x[0] + if is_complex: + values[0] -= 3j * x[1] + values[1] += 2j * x[0] + return values + + hash = write_function(write_mesh, mesh, el, f, f_dtype, "u_original") + + if write_mesh: + mesh_fname = fname + else: + mesh_fname = hash + query = cluster[:].apply_async( + read_function, mesh_fname, hash, "u_original", family, degree, f, f_dtype + ) + query.wait() + assert query.successful(), query.error + + +@pytest.mark.skipif(MPI.COMM_WORLD.size > 1, reason="Test uses ipythonparallel for MPI") +@pytest.mark.parametrize("is_complex", [True, False]) +@pytest.mark.parametrize("family", ["Lagrange", "DG"]) +@pytest.mark.parametrize("degree", [1, 4]) +@pytest.mark.parametrize("write_mesh", [True, False]) +def test_read_write_P_3D(write_mesh, family, degree, is_complex, create_3D_mesh, cluster): + fname = create_3D_mesh + with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "r") as xdmf: + mesh = xdmf.read_mesh() + f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) + el = basix.ufl.element( + family, + mesh.ufl_cell().cellname(), + degree, + basix.LagrangeVariant.gll_warped, + shape=(mesh.geometry.dim,), + ) + + def f(x): + values = np.empty((3, x.shape[1]), dtype=f_dtype) + values[0] = np.pi + x[0] + values[1] = x[1] + 2 * x[0] + values[2] = np.cos(x[2]) + if is_complex: + values[0] -= np.pi * x[1] + values[1] += 3j * x[2] + values[2] += 2j + return values + + hash = write_function(write_mesh, mesh, el, f, f_dtype, "u_original") + MPI.COMM_WORLD.Barrier() + + if write_mesh: + mesh_fname = fname + else: + mesh_fname = hash + + query = cluster[:].apply_async( + read_function, mesh_fname, hash, "u_original", family, degree, f, f_dtype + ) + query.wait() + assert query.successful(), query.error + + +@pytest.mark.skipif(MPI.COMM_WORLD.size > 1, reason="Test uses ipythonparallel for MPI") +@pytest.mark.parametrize("write_mesh", [True, False]) +@pytest.mark.parametrize("is_complex", [True, False]) +@pytest.mark.parametrize("family", ["N1curl", "RT"]) +@pytest.mark.parametrize("degree", [1, 4]) +def test_read_write_2D_vector_simplex( + write_mesh, family, degree, is_complex, create_simplex_mesh_2D, cluster +): + fname = create_simplex_mesh_2D + + f_dtype = get_dtype(np.float64, is_complex) + + def f(x): + values = np.empty((2, x.shape[1]), dtype=f_dtype) + values[0] = np.full(x.shape[1], np.pi) + x[0] + values[1] = x[1] + if is_complex: + values[0] -= np.sin(x[1]) * 2j + values[1] += 3j + return values + + query = cluster[:].apply_async( + write_function_vector, + write_mesh, + fname, + family, + degree, + f, + f_dtype, + "u_original", + ) + query.wait() + assert query.successful(), query.error + paths = query.result() + file_path = paths[0] + assert all([file_path == path for path in paths]) + if write_mesh: + mesh_fname = file_path + else: + mesh_fname = fname + + read_function_vector(mesh_fname, file_path, "u_original", family, degree, f, f_dtype) + + +@pytest.mark.skipif(MPI.COMM_WORLD.size > 1, reason="Test uses ipythonparallel for MPI") +@pytest.mark.parametrize("write_mesh", [True, False]) +@pytest.mark.parametrize("is_complex", [True, False]) +@pytest.mark.parametrize("family", ["N1curl", "RT"]) +@pytest.mark.parametrize("degree", [1, 4]) +def test_read_write_3D_vector_simplex( + write_mesh, family, degree, is_complex, create_simplex_mesh_3D, cluster +): + fname = create_simplex_mesh_3D + + f_dtype = get_dtype(np.float64, is_complex) + + def f(x): + values = np.empty((3, x.shape[1]), dtype=f_dtype) + values[0] = np.full(x.shape[1], np.pi) + values[1] = x[1] + 2 * x[0] + values[2] = np.cos(x[2]) + if is_complex: + values[0] += 2j * x[2] + values[1] += 2j * np.cos(x[2]) + return values + + query = cluster[:].apply_async( + write_function_vector, + write_mesh, + fname, + family, + degree, + f, + f_dtype, + "u_original", + ) + query.wait() + assert query.successful(), query.error + paths = query.result() + file_path = paths[0] + assert all([file_path == path for path in paths]) + if write_mesh: + mesh_fname = file_path + else: + mesh_fname = fname + + read_function_vector(mesh_fname, file_path, "u_original", family, degree, f, f_dtype) + + +@pytest.mark.skipif(MPI.COMM_WORLD.size > 1, reason="Test uses ipythonparallel for MPI") +@pytest.mark.parametrize("write_mesh", [True, False]) +@pytest.mark.parametrize("is_complex", [True, False]) +@pytest.mark.parametrize("family", ["RTCF"]) +@pytest.mark.parametrize("degree", [1, 2, 3]) +def test_read_write_2D_vector_non_simplex( + write_mesh, family, degree, is_complex, create_non_simplex_mesh_2D, cluster +): + fname = create_non_simplex_mesh_2D + + f_dtype = get_dtype(np.float64, is_complex) + + def f(x): + values = np.empty((2, x.shape[1]), dtype=f_dtype) + values[0] = np.full(x.shape[1], np.pi) + values[1] = x[1] + 2 * x[0] + if is_complex: + values[0] += 2j * x[1] + values[1] -= np.sin(x[0]) * 9j + return values + + query = cluster[:].apply_async( + write_function_vector, + write_mesh, + fname, + family, + degree, + f, + f_dtype, + "u_original", + ) + query.wait() + assert query.successful(), query.error + paths = query.result() + file_path = paths[0] + assert all([file_path == path for path in paths]) + if write_mesh: + mesh_fname = file_path + else: + mesh_fname = fname + + read_function_vector(mesh_fname, file_path, "u_original", family, degree, f, f_dtype) + + +@pytest.mark.skipif(MPI.COMM_WORLD.size > 1, reason="Test uses ipythonparallel for MPI") +@pytest.mark.parametrize("write_mesh", [True, False]) +@pytest.mark.parametrize("is_complex", [True, False]) +@pytest.mark.parametrize("family", ["NCF"]) +@pytest.mark.parametrize("degree", [1, 4]) +def test_read_write_3D_vector_non_simplex( + write_mesh, family, degree, is_complex, create_non_simplex_mesh_3D, cluster +): + fname = create_non_simplex_mesh_3D + + f_dtype = get_dtype(np.float64, is_complex) + + def f(x): + values = np.empty((3, x.shape[1]), dtype=f_dtype) + values[0] = np.full(x.shape[1], np.pi) + x[0] + values[1] = np.cos(x[2]) + values[2] = x[0] + if is_complex: + values[2] += x[0] * x[1] * 3j + return values + + query = cluster[:].apply_async( + write_function_vector, + write_mesh, + fname, + family, + degree, + f, + f_dtype, + "u_original", + ) + query.wait() + assert query.successful(), query.error + paths = query.result() + file_path = paths[0] + assert all([file_path == path for path in paths]) + if write_mesh: + mesh_fname = file_path + else: + mesh_fname = fname + + read_function_vector(mesh_fname, file_path, "u_original", family, degree, f, f_dtype) diff --git a/tests/test_snapshot_checkpoint.py b/tests/test_snapshot_checkpoint.py index 1546523..2ce1aea 100644 --- a/tests/test_snapshot_checkpoint.py +++ b/tests/test_snapshot_checkpoint.py @@ -2,15 +2,15 @@ from mpi4py import MPI +import adios2 import basix.ufl import dolfinx import numpy as np import pytest -from adios4dolfinx.adios2_helpers import resolve_adios_scope from adios4dolfinx import snapshot_checkpoint +from adios4dolfinx.adios2_helpers import resolve_adios_scope -import adios2 adios2 = resolve_adios_scope(adios2) @@ -43,9 +43,7 @@ def f(x): assert np.allclose(u.x.array, v.x.array) -@pytest.mark.parametrize( - "cell_type, family", [(tetra, "N1curl"), (tetra, "RT"), (hex, "NCF")] -) +@pytest.mark.parametrize("cell_type, family", [(tetra, "N1curl"), (tetra, "RT"), (hex, "NCF")]) @pytest.mark.parametrize("degree", [1, 4]) def test_read_write_3D(family, degree, cell_type): mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, 3, 3, 3, cell_type=cell_type) @@ -73,9 +71,7 @@ def f(x): @pytest.mark.parametrize("degree", [1, 4]) def test_read_write_P_2D(family, degree, cell_type): mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 5, 5, cell_type=cell_type) - el = basix.ufl.element( - family, mesh.ufl_cell().cellname(), degree, shape=(mesh.geometry.dim,) - ) + el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree, shape=(mesh.geometry.dim,)) def f(x): return (np.full(x.shape[1], np.pi) + x[0], x[1]) @@ -99,9 +95,7 @@ def f(x): @pytest.mark.parametrize("degree", [1, 4]) def test_read_write_P_3D(family, degree, cell_type): mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, 5, 5, 5, cell_type=cell_type) - el = basix.ufl.element( - family, mesh.ufl_cell().cellname(), degree, shape=(mesh.geometry.dim,) - ) + el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree, shape=(mesh.geometry.dim,)) def f(x): return (np.full(x.shape[1], np.pi) + x[0], x[1] + 2 * x[0], np.cos(x[2])) diff --git a/tests/test_utils.py b/tests/test_utils.py index ee4210a..601a98c 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -42,9 +42,7 @@ def write_function(mesh, el, f, dtype, name="uh", append: bool = False) -> str: def read_function(comm, el, f, hash, dtype, name="uh"): filename = f"output/mesh_{hash}.bp" engine = "BP4" - mesh = adios4dolfinx.read_mesh( - comm, filename, engine, dolfinx.mesh.GhostMode.shared_facet - ) + mesh = adios4dolfinx.read_mesh(comm, filename, engine, dolfinx.mesh.GhostMode.shared_facet) V = dolfinx.fem.functionspace(mesh, el) v = dolfinx.fem.Function(V, dtype=dtype) v.name = name @@ -53,18 +51,18 @@ def read_function(comm, el, f, hash, dtype, name="uh"): v_ex.interpolate(f) res = np.finfo(dtype).resolution - assert np.allclose(v.x.array, v_ex.x.array, atol=10 * res, rtol=10 * res) + np.testing.assert_allclose(v.x.array, v_ex.x.array, atol=10 * res, rtol=10 * res) -def get_dtype(in_dtype: np.dtype, complex: bool): +def get_dtype(in_dtype: np.dtype, is_complex: bool): dtype: numpy.typing.DTypeLike if in_dtype == np.float32: - if complex: + if is_complex: dtype = np.complex64 else: dtype = np.float32 elif in_dtype == np.float64: - if complex: + if is_complex: dtype = np.complex128 else: dtype = np.float64 @@ -107,9 +105,7 @@ def write_function_time_dep(mesh, el, f0, f1, t0, t1, dtype) -> str: def read_function_time_dep(comm, el, f0, f1, t0, t1, hash, dtype): filename = f"output/mesh_{hash}.bp" engine = "BP4" - mesh = adios4dolfinx.read_mesh( - comm, filename, engine, dolfinx.mesh.GhostMode.shared_facet - ) + mesh = adios4dolfinx.read_mesh(comm, filename, engine, dolfinx.mesh.GhostMode.shared_facet) V = dolfinx.fem.functionspace(mesh, el) v = dolfinx.fem.Function(V, dtype=dtype) From d7ba0b20e78cbbe4d1945925559bf632c43b219d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Sat, 2 Mar 2024 19:55:37 +0100 Subject: [PATCH 13/49] Make API consistent (filename first) in read_ write_ operations. Vectorize dofmap reading from legacy (#72) * Make API consistent (filename first). Resolves #63 * Fix legacy data which uses old API * Revert change * Ruff fix * Future annotations * Move future to top * Fix last set of typing dependencies * FIx legacy tests * API fix for legacy * Vectorize mapping for reading in checkpoint. Assume dofmap with constant spacing as we don't support anything else atm. * Try fixing tests * Move ipp cluster into conftest.py * Remove debug * Apply suggestions from code review --- src/adios4dolfinx/adios2_helpers.py | 11 +- src/adios4dolfinx/checkpointing.py | 31 ++--- src/adios4dolfinx/comm_helpers.py | 4 +- src/adios4dolfinx/legacy_readers.py | 30 +++-- src/adios4dolfinx/original_checkpoint.py | 10 +- src/adios4dolfinx/structures.py | 12 +- src/adios4dolfinx/utils.py | 20 +-- src/adios4dolfinx/writers.py | 4 +- tests/conftest.py | 154 +++++++++++++++++++++++ tests/test_checkpointing.py | 38 ++++-- tests/test_checkpointing_vector.py | 43 +++++-- tests/test_legacy_readers.py | 24 ++-- tests/test_mesh_writer.py | 4 +- tests/test_meshtags.py | 33 ++--- tests/test_original_checkpoint.py | 60 +++++---- tests/test_utils.py | 124 ------------------ 16 files changed, 337 insertions(+), 265 deletions(-) create mode 100644 tests/conftest.py delete mode 100644 tests/test_utils.py diff --git a/src/adios4dolfinx/adios2_helpers.py b/src/adios4dolfinx/adios2_helpers.py index ead0ed1..fdac650 100644 --- a/src/adios4dolfinx/adios2_helpers.py +++ b/src/adios4dolfinx/adios2_helpers.py @@ -1,5 +1,6 @@ +from __future__ import annotations + from pathlib import Path -from typing import Tuple, Union from mpi4py import MPI @@ -36,7 +37,7 @@ def resolve_adios_scope(adios2): def read_cell_perms( adios: adios2.ADIOS, comm: MPI.Intracomm, - filename: Union[Path, str], + filename: Path | str, variable: str, num_cells_global: np.int64, engine: str, @@ -98,7 +99,7 @@ def read_cell_perms( def read_dofmap( adios: adios2.ADIOS, comm: MPI.Intracomm, - filename: Union[Path, str], + filename: Path | str, dofmap: str, dofmap_offsets: str, num_cells_global: np.int64, @@ -174,14 +175,14 @@ def read_dofmap( def read_array( adios: adios2.ADIOS, - filename: Union[Path, str], + filename: Path | str, array_name: str, engine: str, comm: MPI.Intracomm, time: float = 0.0, time_name: str = "", legacy: bool = False, -) -> Tuple[npt.NDArray[valid_function_types], int]: +) -> tuple[npt.NDArray[valid_function_types], int]: """ Read an array from file, return the global starting position of the local array diff --git a/src/adios4dolfinx/checkpointing.py b/src/adios4dolfinx/checkpointing.py index 723dad1..db16c0e 100644 --- a/src/adios4dolfinx/checkpointing.py +++ b/src/adios4dolfinx/checkpointing.py @@ -4,8 +4,9 @@ # # SPDX-License-Identifier: MIT +from __future__ import annotations + from pathlib import Path -from typing import Optional, Union from mpi4py import MPI @@ -45,10 +46,10 @@ def write_meshtags( - filename: Union[Path, str], + filename: Path | str, mesh: dolfinx.mesh.Mesh, meshtags: dolfinx.mesh.MeshTags, - engine: Optional[str] = "BP4", + engine: str = "BP4", ): """ Write meshtags associated with input mesh to file. @@ -115,7 +116,7 @@ def write_meshtags( def read_meshtags( - filename: Union[Path, str], + filename: Path | str, mesh: dolfinx.mesh.Mesh, meshtag_name: str, engine: str = "BP4", @@ -206,8 +207,8 @@ def read_meshtags( def read_function( + filename: Path | str, u: dolfinx.fem.Function, - filename: Union[Path, str], engine: str = "BP4", time: float = 0.0, legacy: bool = False, @@ -216,8 +217,8 @@ def read_function( Read checkpoint from file and fill it into `u`. Args: - u: Function to fill filename: Path to checkpoint + u: Function to fill engine: ADIOS engine type used for reading legacy: If checkpoint is from prior to time-dependent writing set to True """ @@ -329,17 +330,17 @@ def read_function( def read_mesh( + filename: Path | str, comm: MPI.Intracomm, - filename: Union[Path, str], - engine: str, - ghost_mode: dolfinx.mesh.GhostMode, + engine: str = "BP4", + ghost_mode: dolfinx.mesh.GhostMode = dolfinx.mesh.GhostMode.shared_facet, ) -> dolfinx.mesh.Mesh: """ Read an ADIOS2 mesh into DOLFINx. Args: - comm: The MPI communciator to distribute the mesh over filename: Path to input file + comm: The MPI communciator to distribute the mesh over engine: ADIOS engine to use for reading (BP4, BP5 or HDF5) ghost_mode: Ghost mode to use for mesh Returns: @@ -407,15 +408,15 @@ def read_mesh( return dolfinx.mesh.create_mesh(comm, mesh_topology, mesh_geometry, domain, partitioner) -def write_mesh(mesh: dolfinx.mesh.Mesh, filename: Path, engine: str = "BP4"): +def write_mesh(filename: Path, mesh: dolfinx.mesh.Mesh, engine: str = "BP4"): """ Write a mesh to specified ADIOS2 format, see: https://adios2.readthedocs.io/en/stable/engines/engines.html for possible formats. Args: - mesh: The mesh to write to file filename: Path to save mesh (without file-extension) + mesh: The mesh to write to file engine: Adios2 Engine """ num_xdofs_local = mesh.geometry.index_map().size_local @@ -452,9 +453,9 @@ def write_mesh(mesh: dolfinx.mesh.Mesh, filename: Path, engine: str = "BP4"): # NOTE: Mode will become input again once we have variable geometry _internal_mesh_writer( + filename, mesh.comm, mesh_data, - filename, engine, mode=adios2.Mode.Write, io_name="MeshWriter", @@ -462,8 +463,8 @@ def write_mesh(mesh: dolfinx.mesh.Mesh, filename: Path, engine: str = "BP4"): def write_function( + filename: Path | str, u: dolfinx.fem.Function, - filename: Union[Path, str], engine: str = "BP4", mode: adios2.Mode = adios2.Mode.Append, time: float = 0.0, @@ -530,4 +531,4 @@ def write_function( ) # Write to file fname = Path(filename) - _internal_function_writer(comm, function_data, fname, engine, mode, time, "FunctionWriter") + _internal_function_writer(fname, comm, function_data, engine, mode, time, "FunctionWriter") diff --git a/src/adios4dolfinx/comm_helpers.py b/src/adios4dolfinx/comm_helpers.py index a0fd70b..7268f30 100644 --- a/src/adios4dolfinx/comm_helpers.py +++ b/src/adios4dolfinx/comm_helpers.py @@ -1,4 +1,4 @@ -from typing import Tuple +from __future__ import annotations from mpi4py import MPI @@ -130,7 +130,7 @@ def send_and_recv_cell_perm( perms: npt.NDArray[np.uint32], cell_owners: npt.NDArray[np.int32], comm: MPI.Intracomm, -) -> Tuple[npt.NDArray[np.int64], npt.NDArray[np.uint32]]: +) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.uint32]]: """ Send global cell index and permutation to corresponding entry in `dest_ranks`. diff --git a/src/adios4dolfinx/legacy_readers.py b/src/adios4dolfinx/legacy_readers.py index 7bc64ff..b4441d6 100644 --- a/src/adios4dolfinx/legacy_readers.py +++ b/src/adios4dolfinx/legacy_readers.py @@ -4,8 +4,9 @@ # # SPDX-License-Identifier: MIT +from __future__ import annotations + import pathlib -from typing import Optional from mpi4py import MPI @@ -121,18 +122,15 @@ def read_dofmap_legacy( in_dofmap = in_dofmap.reshape(-1).astype(np.int64) # Map xxxyyyzzz to xyzxyz - mapped_dofmap = np.empty_like(in_dofmap) - for i in range(len(in_offsets) - 1): - pos_begin, pos_end = ( - in_offsets[i] - in_offsets[0], - in_offsets[i + 1] - in_offsets[0], - ) - dofs_i = in_dofmap[pos_begin:pos_end] - assert (pos_end - pos_begin) % bs == 0 - num_dofs_local = int((pos_end - pos_begin) // bs) - for k in range(bs): - for j in range(num_dofs_local): - mapped_dofmap[int(pos_begin + j * bs + k)] = dofs_i[int(num_dofs_local * k + j)] + num_dofs = in_offsets[-1] - in_offsets[0] + assert num_dofs == len(in_dofmap) and (num_dofs) % bs == 0 + num_dofs_per_cell = in_offsets[1:] - in_offsets[:-1] + assert np.allclose( + num_dofs_per_cell, num_dofs_per_cell[0] + ), "Non-uniform number of dofs per cell" + num_cells = len(in_offsets) - 1 + nd_dofmap = in_dofmap.reshape(num_cells, bs, int(num_dofs_per_cell[0] // bs)) + mapped_dofmap = np.swapaxes(nd_dofmap, 1, 2).reshape(-1) # Extract dofmap data global_dofs = np.zeros_like(cells, dtype=np.int64) @@ -249,8 +247,8 @@ def read_mesh_geometry(io: adios2.ADIOS, infile: adios2.Engine, group: str): def read_mesh_from_legacy_h5( - comm: MPI.Intracomm, filename: pathlib.Path, + comm: MPI.Intracomm, group: str, cell_type: str = "tetrahedron", ) -> dolfinx.mesh.Mesh: @@ -313,11 +311,11 @@ def read_mesh_from_legacy_h5( def read_function_from_legacy_h5( - comm: MPI.Intracomm, filename: pathlib.Path, + comm: MPI.Intracomm, u: dolfinx.fem.Function, group: str = "mesh", - step: Optional[int] = None, + step: int | None = None, ): """ Read function from a `h5`-file generated by legacy DOLFIN `HDF5File.write` diff --git a/src/adios4dolfinx/original_checkpoint.py b/src/adios4dolfinx/original_checkpoint.py index c965761..41ac274 100644 --- a/src/adios4dolfinx/original_checkpoint.py +++ b/src/adios4dolfinx/original_checkpoint.py @@ -4,6 +4,8 @@ # # SPDX-License-Identifier: MIT +from __future__ import annotations + from pathlib import Path from mpi4py import MPI @@ -313,8 +315,8 @@ def create_function_data_on_original_mesh(u: dolfinx.fem.Function) -> FunctionDa def write_function_on_input_mesh( - u: dolfinx.fem.Function, filename: Path | str, + u: dolfinx.fem.Function, engine: str = "BP4", mode: adios2.Mode = adios2.Mode.Append, time: float = 0.0, @@ -334,9 +336,9 @@ def write_function_on_input_mesh( function_data = create_function_data_on_original_mesh(u) fname = Path(filename) write_function( + fname, mesh.comm, function_data, - fname, engine, mode, time, @@ -344,11 +346,11 @@ def write_function_on_input_mesh( ) -def write_mesh_input_order(mesh: dolfinx.mesh.Mesh, filename: Path | str, engine: str = "BP4"): +def write_mesh_input_order(filename: Path | str, mesh: dolfinx.mesh.Mesh, engine: str = "BP4"): """ Write mesh to checkpoint file in original input ordering """ mesh_data = create_original_mesh_data(mesh) fname = Path(filename) - write_mesh(mesh.comm, mesh_data, fname, engine, io_name="OriginalMeshWriter") + write_mesh(fname, mesh.comm, mesh_data, engine, io_name="OriginalMeshWriter") diff --git a/src/adios4dolfinx/structures.py b/src/adios4dolfinx/structures.py index 068987e..cbb9558 100644 --- a/src/adios4dolfinx/structures.py +++ b/src/adios4dolfinx/structures.py @@ -4,9 +4,9 @@ # # SPDX-License-Identifier: MIT +from __future__ import annotations from dataclasses import dataclass -from typing import Tuple import numpy as np import numpy.typing as npt @@ -19,12 +19,12 @@ class MeshData: # 2 dimensional array of node coordinates local_geometry: npt.NDArray[np.floating] - local_geometry_pos: Tuple[int, int] # Insert range on current process for geometry nodes + local_geometry_pos: tuple[int, int] # Insert range on current process for geometry nodes num_nodes_global: int # Number of nodes in global geometry array local_topology: npt.NDArray[np.int64] # 2 dimensional connecitivty array for mesh topology # Insert range on current process for topology - local_topology_pos: Tuple[int, int] + local_topology_pos: tuple[int, int] num_cells_global: int # NUmber of cells in global topology cell_type: str @@ -35,13 +35,13 @@ class MeshData: @dataclass class FunctionData: cell_permutations: npt.NDArray[np.uint32] # Cell permutations for dofmap - local_cell_range: Tuple[int, int] # Range of cells on current process + local_cell_range: tuple[int, int] # Range of cells on current process num_cells_global: int # Number of cells in global topology dofmap_array: npt.NDArray[np.int64] # Local function dofmap (using global indices) dofmap_offsets: npt.NDArray[np.int64] # Global dofmap offsets - dofmap_range: Tuple[int, int] # Range of dofmap on current process + dofmap_range: tuple[int, int] # Range of dofmap on current process global_dofs_in_dofmap: int # Number of entries in global dofmap values: npt.NDArray[np.floating] # Local function values - dof_range: Tuple[int, int] # Range of local function values + dof_range: tuple[int, int] # Range of local function values num_dofs_global: int # Number of global function values name: str # Name of function diff --git a/src/adios4dolfinx/utils.py b/src/adios4dolfinx/utils.py index 7b13bc8..8570e2f 100644 --- a/src/adios4dolfinx/utils.py +++ b/src/adios4dolfinx/utils.py @@ -8,6 +8,13 @@ Vectorized numpy operations used internally in adios4dolfinx """ +from __future__ import annotations + +from mpi4py import MPI + +import dolfinx +import numpy as np +import numpy.typing as npt __all__ = [ "compute_local_range", @@ -17,16 +24,9 @@ "compute_insert_position", "unroll_insert_position", ] -from typing import Tuple, Union - -from mpi4py import MPI - -import dolfinx -import numpy as np -import numpy.typing as npt -valid_function_types = Union[np.float32, np.float64, np.complex64, np.complex128] -valid_real_types = Union[np.float32, np.float64] +valid_function_types = np.float32 | np.float64 | np.complex64 | np.complex128 +valid_real_types = np.float32 | np.float64 def compute_insert_position( @@ -147,7 +147,7 @@ def unroll_dofmap(dofs: npt.NDArray[np.int32], bs: int) -> npt.NDArray[np.int32] def compute_dofmap_pos( V: dolfinx.fem.FunctionSpace, -) -> Tuple[npt.NDArray[np.int32], npt.NDArray[np.int32]]: +) -> tuple[npt.NDArray[np.int32], npt.NDArray[np.int32]]: """ Compute a map from each owned dof in the dofmap to a single cell owned by the process, and the relative position of the dof. diff --git a/src/adios4dolfinx/writers.py b/src/adios4dolfinx/writers.py index a83dc34..8f5ac01 100644 --- a/src/adios4dolfinx/writers.py +++ b/src/adios4dolfinx/writers.py @@ -19,9 +19,9 @@ def write_mesh( + filename: Path, comm: MPI.Intracomm, mesh: MeshData, - filename: Path, engine: str = "BP4", mode: adios2.Mode = adios2.Mode.Write, io_name: str = "MeshWriter", @@ -83,9 +83,9 @@ def write_mesh( def write_function( + filename: Path, comm: MPI.Intracomm, u: FunctionData, - filename: Path, engine: str = "BP4", mode: adios2.Mode = adios2.Mode.Append, time: float = 0.0, diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..eadbc47 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,154 @@ +import pathlib + +from mpi4py import MPI + +import dolfinx +import ipyparallel as ipp +import numpy as np +import numpy.typing +import pytest + +import adios4dolfinx + + +@pytest.fixture(scope="module") +def cluster(): + cluster = ipp.Cluster(engines="mpi", n=2) + rc = cluster.start_and_connect_sync() + yield rc + cluster.stop_cluster_sync() + + +@pytest.fixture(scope="function") +def write_function(): + def _write_function(mesh, el, f, dtype, name="uh", append: bool = False) -> str: + V = dolfinx.fem.functionspace(mesh, el) + uh = dolfinx.fem.Function(V, dtype=dtype) + uh.interpolate(f) + uh.name = name + el_hash = ( + V.element.signature() + .replace(" ", "") + .replace(",", "") + .replace("(", "") + .replace(")", "") + .replace("[", "") + .replace("]", "") + ) + + file_hash = f"{el_hash}_{np.dtype(dtype).name}" + filename = pathlib.Path(f"output/mesh_{file_hash}.bp") + if mesh.comm.size != 1: + if not append: + adios4dolfinx.write_mesh(filename, mesh) + adios4dolfinx.write_function(filename, uh, time=0.0) + else: + if MPI.COMM_WORLD.rank == 0: + if not append: + adios4dolfinx.write_mesh(filename, mesh) + adios4dolfinx.write_function(filename, uh, time=0.0) + + return file_hash + + return _write_function + + +@pytest.fixture(scope="function") +def read_function(): + def _read_function(comm, el, f, hash, dtype, name="uh"): + filename = f"output/mesh_{hash}.bp" + engine = "BP4" + mesh = adios4dolfinx.read_mesh(filename, comm, engine, dolfinx.mesh.GhostMode.shared_facet) + V = dolfinx.fem.functionspace(mesh, el) + v = dolfinx.fem.Function(V, dtype=dtype) + v.name = name + adios4dolfinx.read_function(filename, v, engine) + v_ex = dolfinx.fem.Function(V, dtype=dtype) + v_ex.interpolate(f) + + res = np.finfo(dtype).resolution + np.testing.assert_allclose(v.x.array, v_ex.x.array, atol=10 * res, rtol=10 * res) + + return _read_function + + +@pytest.fixture(scope="function") +def get_dtype(): + def _get_dtype(in_dtype: np.dtype, is_complex: bool): + dtype: numpy.typing.DTypeLike + if in_dtype == np.float32: + if is_complex: + dtype = np.complex64 + else: + dtype = np.float32 + elif in_dtype == np.float64: + if is_complex: + dtype = np.complex128 + else: + dtype = np.float64 + else: + raise ValueError("Unsuported dtype") + return dtype + + return _get_dtype + + +@pytest.fixture(scope="function") +def write_function_time_dep(): + def _write_function_time_dep(mesh, el, f0, f1, t0, t1, dtype) -> str: + V = dolfinx.fem.functionspace(mesh, el) + uh = dolfinx.fem.Function(V, dtype=dtype) + uh.interpolate(f0) + el_hash = ( + V.element.signature() + .replace(" ", "") + .replace(",", "") + .replace("(", "") + .replace(")", "") + .replace("[", "") + .replace("]", "") + ) + file_hash = f"{el_hash}_{np.dtype(dtype).name}" + filename = pathlib.Path(f"output/mesh_{file_hash}.bp") + if mesh.comm.size != 1: + adios4dolfinx.write_mesh(filename, mesh) + adios4dolfinx.write_function(filename, uh, time=t0) + uh.interpolate(f1) + adios4dolfinx.write_function(filename, uh, time=t1) + + else: + if MPI.COMM_WORLD.rank == 0: + adios4dolfinx.write_mesh(filename, mesh) + adios4dolfinx.write_function(filename, uh, time=t0) + uh.interpolate(f1) + adios4dolfinx.write_function(filename, uh, time=t1) + + return file_hash + + return _write_function_time_dep + + +@pytest.fixture(scope="function") +def read_function_time_dep(): + def _read_function_time_dep(comm, el, f0, f1, t0, t1, hash, dtype): + filename = f"output/mesh_{hash}.bp" + engine = "BP4" + mesh = adios4dolfinx.read_mesh(filename, comm, engine, dolfinx.mesh.GhostMode.shared_facet) + V = dolfinx.fem.functionspace(mesh, el) + v = dolfinx.fem.Function(V, dtype=dtype) + + adios4dolfinx.read_function(filename, v, engine, time=t1) + v_ex = dolfinx.fem.Function(V, dtype=dtype) + v_ex.interpolate(f1) + + res = np.finfo(dtype).resolution + assert np.allclose(v.x.array, v_ex.x.array, atol=10 * res, rtol=10 * res) + + adios4dolfinx.read_function(filename, v, engine, time=t0) + v_ex = dolfinx.fem.Function(V, dtype=dtype) + v_ex.interpolate(f0) + + res = np.finfo(dtype).resolution + assert np.allclose(v.x.array, v_ex.x.array, atol=10 * res, rtol=10 * res) + + return _read_function_time_dep diff --git a/tests/test_checkpointing.py b/tests/test_checkpointing.py index 23c5508..153fb3f 100644 --- a/tests/test_checkpointing.py +++ b/tests/test_checkpointing.py @@ -8,14 +8,6 @@ import numpy as np import pytest -from .test_utils import ( - get_dtype, - read_function, - read_function_time_dep, - write_function, - write_function_time_dep, -) - dtypes = [np.float64, np.float32] # Mesh geometry dtypes write_comm = [MPI.COMM_SELF, MPI.COMM_WORLD] # Communicators for creating mesh @@ -51,7 +43,9 @@ def mesh_3D(request): @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_P_2D(read_comm, family, degree, is_complex, mesh_2D): +def test_read_write_P_2D( + read_comm, family, degree, is_complex, mesh_2D, get_dtype, write_function, read_function +): mesh = mesh_2D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) @@ -82,7 +76,9 @@ def f(x): @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_P_3D(read_comm, family, degree, is_complex, mesh_3D): +def test_read_write_P_3D( + read_comm, family, degree, is_complex, mesh_3D, get_dtype, write_function, read_function +): mesh = mesh_3D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element( @@ -113,7 +109,16 @@ def f(x): @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_P_2D_time(read_comm, family, degree, is_complex, mesh_2D): +def test_read_write_P_2D_time( + read_comm, + family, + degree, + is_complex, + mesh_2D, + get_dtype, + write_function_time_dep, + read_function_time_dep, +): mesh = mesh_2D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) @@ -155,7 +160,16 @@ def f1(x): @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_P_3D_time(read_comm, family, degree, is_complex, mesh_3D): +def test_read_write_P_3D_time( + read_comm, + family, + degree, + is_complex, + mesh_3D, + get_dtype, + write_function_time_dep, + read_function_time_dep, +): mesh = mesh_3D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element( diff --git a/tests/test_checkpointing_vector.py b/tests/test_checkpointing_vector.py index a22d95e..869647c 100644 --- a/tests/test_checkpointing_vector.py +++ b/tests/test_checkpointing_vector.py @@ -8,8 +8,6 @@ import numpy as np import pytest -from .test_utils import get_dtype, read_function, write_function - dtypes = [np.float64, np.float32] # Mesh geometry dtypes write_comm = [MPI.COMM_SELF, MPI.COMM_WORLD] # Communicators for creating mesh @@ -53,7 +51,9 @@ def non_simplex_mesh_3D(request): @pytest.mark.parametrize("family", ["N1curl", "RT"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_2D(read_comm, family, degree, is_complex, simplex_mesh_2D): +def test_read_write_2D( + read_comm, family, degree, is_complex, simplex_mesh_2D, get_dtype, write_function, read_function +): mesh = simplex_mesh_2D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree) @@ -76,7 +76,9 @@ def f(x): @pytest.mark.parametrize("family", ["N1curl", "RT"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_3D(read_comm, family, degree, is_complex, simplex_mesh_3D): +def test_read_write_3D( + read_comm, family, degree, is_complex, simplex_mesh_3D, get_dtype, write_function, read_function +): mesh = simplex_mesh_3D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree) @@ -100,7 +102,16 @@ def f(x): @pytest.mark.parametrize("family", ["RTCF"]) @pytest.mark.parametrize("degree", [1, 2, 3]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_2D_quad(read_comm, family, degree, is_complex, non_simplex_mesh_2D): +def test_read_write_2D_quad( + read_comm, + family, + degree, + is_complex, + non_simplex_mesh_2D, + get_dtype, + write_function, + read_function, +): mesh = non_simplex_mesh_2D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree) @@ -123,7 +134,16 @@ def f(x): @pytest.mark.parametrize("family", ["NCF"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_hex(read_comm, family, degree, is_complex, non_simplex_mesh_3D): +def test_read_write_hex( + read_comm, + family, + degree, + is_complex, + non_simplex_mesh_3D, + get_dtype, + write_function, + read_function, +): mesh = non_simplex_mesh_3D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree) @@ -147,7 +167,16 @@ def f(x): @pytest.mark.parametrize("family", ["RTCF"]) @pytest.mark.parametrize("degree", [1, 2, 3]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_read_write_multiple(read_comm, family, degree, is_complex, non_simplex_mesh_2D): +def test_read_write_multiple( + read_comm, + family, + degree, + is_complex, + non_simplex_mesh_2D, + get_dtype, + write_function, + read_function, +): mesh = non_simplex_mesh_2D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree) diff --git a/tests/test_legacy_readers.py b/tests/test_legacy_readers.py index 7f7069c..dd41bac 100644 --- a/tests/test_legacy_readers.py +++ b/tests/test_legacy_readers.py @@ -28,7 +28,7 @@ def test_legacy_mesh(): path = (pathlib.Path("legacy") / "mesh.h5").absolute() if not path.exists(): pytest.skip(f"{path} does not exist") - mesh = read_mesh_from_legacy_h5(comm=comm, filename=path, group="/mesh") + mesh = read_mesh_from_legacy_h5(filename=path, comm=comm, group="/mesh") assert mesh.topology.dim == 3 volume = mesh.comm.allreduce( dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * ufl.dx(domain=mesh))), @@ -51,7 +51,7 @@ def test_read_legacy_mesh_from_checkpoint(): filename = (pathlib.Path("legacy") / "mesh_checkpoint.h5").absolute() if not filename.exists(): pytest.skip(f"{filename} does not exist") - mesh = read_mesh_from_legacy_h5(comm=comm, filename=filename, group="/Mesh/mesh") + mesh = read_mesh_from_legacy_h5(filename=filename, comm=comm, group="/Mesh/mesh") assert mesh.topology.dim == 3 volume = mesh.comm.allreduce( dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * ufl.dx(domain=mesh))), @@ -74,7 +74,7 @@ def test_legacy_function(): path = (pathlib.Path("legacy") / "mesh.h5").absolute() if not path.exists(): pytest.skip(f"{path} does not exist") - mesh = read_mesh_from_legacy_h5(comm, path, "/mesh") + mesh = read_mesh_from_legacy_h5(path, comm, "/mesh") V = dolfinx.fem.functionspace(mesh, ("DG", 2)) u = ufl.TrialFunction(V) v = ufl.TestFunction(V) @@ -88,7 +88,7 @@ def test_legacy_function(): problem.solve() u_in = dolfinx.fem.Function(V) - read_function_from_legacy_h5(mesh.comm, path, u_in, group="v") + read_function_from_legacy_h5(path, mesh.comm, u_in, group="v") np.testing.assert_allclose(uh.x.array, u_in.x.array, atol=1e-14) W = dolfinx.fem.functionspace(mesh, ("DG", 2, (mesh.geometry.dim,))) @@ -96,7 +96,7 @@ def test_legacy_function(): wh.interpolate(lambda x: (x[0], 3 * x[2], 7 * x[1])) w_in = dolfinx.fem.Function(W) - read_function_from_legacy_h5(mesh.comm, path, w_in, group="w") + read_function_from_legacy_h5(path, mesh.comm, w_in, group="w") np.testing.assert_allclose(wh.x.array, w_in.x.array, atol=1e-14) @@ -106,7 +106,7 @@ def test_read_legacy_function_from_checkpoint(): path = (pathlib.Path("legacy") / "mesh_checkpoint.h5").absolute() if not path.exists(): pytest.skip(f"{path} does not exist") - mesh = read_mesh_from_legacy_h5(comm, path, "/Mesh/mesh") + mesh = read_mesh_from_legacy_h5(path, comm, "/Mesh/mesh") V = dolfinx.fem.functionspace(mesh, ("DG", 2)) u = ufl.TrialFunction(V) @@ -121,12 +121,12 @@ def test_read_legacy_function_from_checkpoint(): problem.solve() u_in = dolfinx.fem.Function(V) - read_function_from_legacy_h5(mesh.comm, path, u_in, group="v", step=0) + read_function_from_legacy_h5(path, mesh.comm, u_in, group="v", step=0) assert np.allclose(uh.x.array, u_in.x.array) # Check second step uh.interpolate(lambda x: x[0]) - read_function_from_legacy_h5(mesh.comm, path, u_in, group="v", step=1) + read_function_from_legacy_h5(path, mesh.comm, u_in, group="v", step=1) assert np.allclose(uh.x.array, u_in.x.array) W = dolfinx.fem.functionspace(mesh, ("DG", 2, (mesh.geometry.dim,))) @@ -134,11 +134,11 @@ def test_read_legacy_function_from_checkpoint(): wh.interpolate(lambda x: (x[0], 3 * x[2], 7 * x[1])) w_in = dolfinx.fem.Function(W) - read_function_from_legacy_h5(mesh.comm, path, w_in, group="w", step=0) + read_function_from_legacy_h5(path, mesh.comm, w_in, group="w", step=0) np.testing.assert_allclose(wh.x.array, w_in.x.array, atol=1e-14) wh.interpolate(lambda x: np.vstack((x[0], 0 * x[0], x[1]))) - read_function_from_legacy_h5(mesh.comm, path, w_in, group="w", step=1) + read_function_from_legacy_h5(path, mesh.comm, w_in, group="w", step=1) np.testing.assert_allclose(wh.x.array, w_in.x.array, atol=1e-14) @@ -149,7 +149,7 @@ def test_adios4dolfinx_legacy(): pytest.skip(f"{path} does not exist") el = ("N1curl", 3) - mesh = read_mesh(comm, path, "BP4", dolfinx.mesh.GhostMode.shared_facet) + mesh = read_mesh(path, comm, "BP4", dolfinx.mesh.GhostMode.shared_facet) def f(x): values = np.zeros((2, x.shape[1]), dtype=np.float64) @@ -159,7 +159,7 @@ def f(x): V = dolfinx.fem.functionspace(mesh, el) u = dolfinx.fem.Function(V) - read_function(u, path, engine="BP4", legacy=True) + read_function(path, u, engine="BP4", legacy=True) u_ex = dolfinx.fem.Function(V) u_ex.interpolate(f) diff --git a/tests/test_mesh_writer.py b/tests/test_mesh_writer.py index e234e13..26b5483 100644 --- a/tests/test_mesh_writer.py +++ b/tests/test_mesh_writer.py @@ -21,7 +21,7 @@ def test_mesh_read_writer(encoder, suffix, ghost_mode): mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, N, N, N, ghost_mode=ghost_mode) start = time.perf_counter() - write_mesh(mesh, file.with_suffix(suffix), encoder) + write_mesh(file.with_suffix(suffix), mesh, encoder) end = time.perf_counter() print(f"Write ADIOS2 mesh: {end-start}") @@ -34,7 +34,7 @@ def test_mesh_read_writer(encoder, suffix, ghost_mode): mesh.comm.Barrier() start = time.perf_counter() - mesh_adios = read_mesh(MPI.COMM_WORLD, file.with_suffix(suffix), encoder, ghost_mode) + mesh_adios = read_mesh(file.with_suffix(suffix), MPI.COMM_WORLD, encoder, ghost_mode) end = time.perf_counter() print(f"Read ADIOS2 mesh: {end-start}") mesh.comm.Barrier() diff --git a/tests/test_meshtags.py b/tests/test_meshtags.py index e1f4535..bdbe768 100644 --- a/tests/test_meshtags.py +++ b/tests/test_meshtags.py @@ -1,6 +1,7 @@ +from __future__ import annotations + import itertools from collections import ChainMap -from typing import Dict, List, Tuple, Union from mpi4py import MPI @@ -12,21 +13,21 @@ import adios4dolfinx root = 0 -dtypes: List["str"] = ["float64", "float32"] # Mesh geometry dtypes -write_comm: List[MPI.Intracomm] = [ +dtypes: list["str"] = ["float64", "float32"] # Mesh geometry dtypes +write_comm: list[MPI.Intracomm] = [ MPI.COMM_SELF, MPI.COMM_WORLD, ] # Communicators for creating mesh -read_modes: List[dolfinx.mesh.GhostMode] = [ +read_modes: list[dolfinx.mesh.GhostMode] = [ dolfinx.mesh.GhostMode.none, dolfinx.mesh.GhostMode.shared_facet, ] # Cell types of different dimensions -two_dimensional_cell_types: List[dolfinx.mesh.CellType] = [ +two_dimensional_cell_types: list[dolfinx.mesh.CellType] = [ dolfinx.mesh.CellType.triangle, dolfinx.mesh.CellType.quadrilateral, ] -three_dimensional_cell_types: List[dolfinx.mesh.CellType] = [ +three_dimensional_cell_types: list[dolfinx.mesh.CellType] = [ dolfinx.mesh.CellType.tetrahedron, dolfinx.mesh.CellType.hexahedron, ] @@ -66,7 +67,7 @@ def generate_reference_map( meshtag: dolfinx.mesh.MeshTags, comm: MPI.Intracomm, root: int, -) -> Union[None, Dict[str, Tuple[int, npt.NDArray]]]: +) -> None | dict[str, tuple[int, npt.NDArray]]: """ Helper function to generate map from meshtag value to its corresponding index and midpoint. @@ -104,10 +105,10 @@ def test_checkpointing_meshtags_1D(mesh_1D, read_comm, read_mode): # If mesh communicator is more than a self communicator or serial write on all processes. # If serial or self communicator, only write on root rank if mesh.comm.size != 1: - adios4dolfinx.write_mesh(mesh, filename, engine="BP4") + adios4dolfinx.write_mesh(filename, mesh, engine="BP4") else: if MPI.COMM_WORLD.rank == root: - adios4dolfinx.write_mesh(mesh, filename, engine="BP4") + adios4dolfinx.write_mesh(filename, mesh, engine="BP4") # Create meshtags labeling each entity (of each co-dimension) with a # unique number (their initial global index). @@ -137,7 +138,7 @@ def test_checkpointing_meshtags_1D(mesh_1D, read_comm, read_mode): MPI.COMM_WORLD.Barrier() # Read mesh on testing communicator - new_mesh = adios4dolfinx.read_mesh(read_comm, filename, engine="BP4", ghost_mode=read_mode) + new_mesh = adios4dolfinx.read_mesh(filename, read_comm, engine="BP4", ghost_mode=read_mode) for dim in range(new_mesh.topology.dim + 1): # Read meshtags on all processes if testing communicator has multiple ranks # else read on root 0 @@ -171,10 +172,10 @@ def test_checkpointing_meshtags_2D(mesh_2D, read_comm, read_mode): hash = f"{mesh.comm.size}_{mesh.topology.cell_name()}_{mesh.geometry.x.dtype}" filename = f"meshtags_1D_{hash}.bp" if mesh.comm.size != 1: - adios4dolfinx.write_mesh(mesh, filename, engine="BP4") + adios4dolfinx.write_mesh(filename, mesh, engine="BP4") else: if MPI.COMM_WORLD.rank == root: - adios4dolfinx.write_mesh(mesh, filename, engine="BP4") + adios4dolfinx.write_mesh(filename, mesh, engine="BP4") org_maps = [] for dim in range(mesh.topology.dim + 1): @@ -196,7 +197,7 @@ def test_checkpointing_meshtags_2D(mesh_2D, read_comm, read_mode): del ft del mesh MPI.COMM_WORLD.Barrier() - new_mesh = adios4dolfinx.read_mesh(read_comm, filename, engine="BP4", ghost_mode=read_mode) + new_mesh = adios4dolfinx.read_mesh(filename, read_comm, engine="BP4", ghost_mode=read_mode) for dim in range(new_mesh.topology.dim + 1): if read_comm.size != 1: new_ft = adios4dolfinx.read_meshtags( @@ -225,10 +226,10 @@ def test_checkpointing_meshtags_3D(mesh_3D, read_comm, read_mode): hash = f"{mesh.comm.size}_{mesh.topology.cell_name()}_{mesh.geometry.x.dtype}" filename = f"meshtags_1D_{hash}.bp" if mesh.comm.size != 1: - adios4dolfinx.write_mesh(mesh, filename, engine="BP4") + adios4dolfinx.write_mesh(filename, mesh, engine="BP4") else: if MPI.COMM_WORLD.rank == root: - adios4dolfinx.write_mesh(mesh, filename, engine="BP4") + adios4dolfinx.write_mesh(filename, mesh, engine="BP4") org_maps = [] for dim in range(mesh.topology.dim + 1): @@ -252,7 +253,7 @@ def test_checkpointing_meshtags_3D(mesh_3D, read_comm, read_mode): del mesh MPI.COMM_WORLD.Barrier() - new_mesh = adios4dolfinx.read_mesh(read_comm, filename, engine="BP4", ghost_mode=read_mode) + new_mesh = adios4dolfinx.read_mesh(filename, read_comm, engine="BP4", ghost_mode=read_mode) for dim in range(new_mesh.topology.dim + 1): if read_comm.size != 1: new_ft = adios4dolfinx.read_meshtags( diff --git a/tests/test_original_checkpoint.py b/tests/test_original_checkpoint.py index 1fc86c6..35c40a9 100644 --- a/tests/test_original_checkpoint.py +++ b/tests/test_original_checkpoint.py @@ -1,20 +1,19 @@ +from __future__ import annotations + import itertools +from collections.abc import Callable from pathlib import Path -from typing import Callable from mpi4py import MPI import basix import basix.ufl import dolfinx -import ipyparallel as ipp import numpy as np import pytest import adios4dolfinx -from .test_utils import get_dtype - dtypes = [np.float64, np.float32] # Mesh geometry dtypes two_dimensional_cell_types = [ @@ -112,15 +111,7 @@ def create_3D_mesh(request): return fname -@pytest.fixture(scope="module") -def cluster(): - cluster = ipp.Cluster(engines="mpi", n=2) - rc = cluster.start_and_connect_sync() - yield rc - cluster.stop_cluster_sync() - - -def write_function( +def write_function_original( write_mesh: bool, mesh: dolfinx.mesh.Mesh, el: basix.ufl._ElementBase, @@ -147,12 +138,12 @@ def write_function( filename = Path(f"output/mesh_{file_hash}.bp") if write_mesh: - adios4dolfinx.write_mesh_input_order(mesh, filename) - adios4dolfinx.write_function_on_input_mesh(uh, filename, time=0.0) + adios4dolfinx.write_mesh_input_order(filename, mesh) + adios4dolfinx.write_function_on_input_mesh(filename, uh, time=0.0) return filename -def read_function( +def read_function_original( mesh_fname: Path, u_fname: Path, u_name: str, @@ -176,7 +167,7 @@ def read_function( mesh = xdmf.read_mesh() elif mesh_fname.suffix == ".bp": mesh = adios4dolfinx.read_mesh( - MPI.COMM_WORLD, mesh_fname, "BP4", dolfinx.mesh.GhostMode.shared_facet + mesh_fname, MPI.COMM_WORLD, "BP4", dolfinx.mesh.GhostMode.shared_facet ) el = basix.ufl.element( family, @@ -189,7 +180,7 @@ def read_function( V = dolfinx.fem.functionspace(mesh, el) u = dolfinx.fem.Function(V, name=u_name, dtype=u_dtype) - adios4dolfinx.read_function(u, u_fname, time=0.0) + adios4dolfinx.read_function(u_fname, u, time=0.0) MPI.COMM_WORLD.Barrier() u_ex = dolfinx.fem.Function(V, name="exact", dtype=u_dtype) @@ -238,8 +229,8 @@ def write_function_vector( filename = Path(f"output/mesh_{file_hash}.bp") if write_mesh: - adios4dolfinx.write_mesh_input_order(mesh, filename) - adios4dolfinx.write_function_on_input_mesh(uh, filename, time=0.0) + adios4dolfinx.write_mesh_input_order(filename, mesh) + adios4dolfinx.write_function_on_input_mesh(filename, uh, time=0.0) return filename @@ -261,13 +252,13 @@ def read_function_vector( mesh = xdmf.read_mesh() elif mesh_fname.suffix == ".bp": mesh = adios4dolfinx.read_mesh( - MPI.COMM_WORLD, mesh_fname, "BP4", dolfinx.mesh.GhostMode.shared_facet + mesh_fname, MPI.COMM_WORLD, "BP4", dolfinx.mesh.GhostMode.shared_facet ) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree) V = dolfinx.fem.functionspace(mesh, el) u = dolfinx.fem.Function(V, name=u_name, dtype=u_dtype) - adios4dolfinx.read_function(u, u_fname, time=0.0) + adios4dolfinx.read_function(u_fname, u, time=0.0) MPI.COMM_WORLD.Barrier() u_ex = dolfinx.fem.Function(V, name="exact", dtype=u_dtype) @@ -282,7 +273,9 @@ def read_function_vector( @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("write_mesh", [True, False]) -def test_read_write_P_2D(write_mesh, family, degree, is_complex, create_2D_mesh, cluster): +def test_read_write_P_2D( + write_mesh, family, degree, is_complex, create_2D_mesh, cluster, get_dtype +): fname = create_2D_mesh with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "r") as xdmf: mesh = xdmf.read_mesh() @@ -306,14 +299,15 @@ def f(x): values[1] += 2j * x[0] return values - hash = write_function(write_mesh, mesh, el, f, f_dtype, "u_original") + hash = write_function_original(write_mesh, mesh, el, f, f_dtype, "u_original") if write_mesh: mesh_fname = fname else: mesh_fname = hash + query = cluster[:].apply_async( - read_function, mesh_fname, hash, "u_original", family, degree, f, f_dtype + read_function_original, mesh_fname, hash, "u_original", family, degree, f, f_dtype ) query.wait() assert query.successful(), query.error @@ -324,7 +318,9 @@ def f(x): @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("write_mesh", [True, False]) -def test_read_write_P_3D(write_mesh, family, degree, is_complex, create_3D_mesh, cluster): +def test_read_write_P_3D( + write_mesh, family, degree, is_complex, create_3D_mesh, cluster, get_dtype +): fname = create_3D_mesh with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "r") as xdmf: mesh = xdmf.read_mesh() @@ -348,7 +344,7 @@ def f(x): values[2] += 2j return values - hash = write_function(write_mesh, mesh, el, f, f_dtype, "u_original") + hash = write_function_original(write_mesh, mesh, el, f, f_dtype, "u_original") MPI.COMM_WORLD.Barrier() if write_mesh: @@ -357,7 +353,7 @@ def f(x): mesh_fname = hash query = cluster[:].apply_async( - read_function, mesh_fname, hash, "u_original", family, degree, f, f_dtype + read_function_original, mesh_fname, hash, "u_original", family, degree, f, f_dtype ) query.wait() assert query.successful(), query.error @@ -369,7 +365,7 @@ def f(x): @pytest.mark.parametrize("family", ["N1curl", "RT"]) @pytest.mark.parametrize("degree", [1, 4]) def test_read_write_2D_vector_simplex( - write_mesh, family, degree, is_complex, create_simplex_mesh_2D, cluster + write_mesh, family, degree, is_complex, create_simplex_mesh_2D, cluster, get_dtype ): fname = create_simplex_mesh_2D @@ -413,7 +409,7 @@ def f(x): @pytest.mark.parametrize("family", ["N1curl", "RT"]) @pytest.mark.parametrize("degree", [1, 4]) def test_read_write_3D_vector_simplex( - write_mesh, family, degree, is_complex, create_simplex_mesh_3D, cluster + write_mesh, family, degree, is_complex, create_simplex_mesh_3D, cluster, get_dtype ): fname = create_simplex_mesh_3D @@ -458,7 +454,7 @@ def f(x): @pytest.mark.parametrize("family", ["RTCF"]) @pytest.mark.parametrize("degree", [1, 2, 3]) def test_read_write_2D_vector_non_simplex( - write_mesh, family, degree, is_complex, create_non_simplex_mesh_2D, cluster + write_mesh, family, degree, is_complex, create_non_simplex_mesh_2D, cluster, get_dtype ): fname = create_non_simplex_mesh_2D @@ -502,7 +498,7 @@ def f(x): @pytest.mark.parametrize("family", ["NCF"]) @pytest.mark.parametrize("degree", [1, 4]) def test_read_write_3D_vector_non_simplex( - write_mesh, family, degree, is_complex, create_non_simplex_mesh_3D, cluster + write_mesh, family, degree, is_complex, create_non_simplex_mesh_3D, cluster, get_dtype ): fname = create_non_simplex_mesh_3D diff --git a/tests/test_utils.py b/tests/test_utils.py deleted file mode 100644 index 601a98c..0000000 --- a/tests/test_utils.py +++ /dev/null @@ -1,124 +0,0 @@ -import pathlib - -from mpi4py import MPI - -import dolfinx -import numpy as np -import numpy.typing - -import adios4dolfinx - - -def write_function(mesh, el, f, dtype, name="uh", append: bool = False) -> str: - V = dolfinx.fem.functionspace(mesh, el) - uh = dolfinx.fem.Function(V, dtype=dtype) - uh.interpolate(f) - uh.name = name - el_hash = ( - V.element.signature() - .replace(" ", "") - .replace(",", "") - .replace("(", "") - .replace(")", "") - .replace("[", "") - .replace("]", "") - ) - - file_hash = f"{el_hash}_{np.dtype(dtype).name}" - filename = pathlib.Path(f"output/mesh_{file_hash}.bp") - if mesh.comm.size != 1: - if not append: - adios4dolfinx.write_mesh(mesh, filename) - adios4dolfinx.write_function(uh, filename, time=0.0) - else: - if MPI.COMM_WORLD.rank == 0: - if not append: - adios4dolfinx.write_mesh(mesh, filename) - adios4dolfinx.write_function(uh, filename, time=0.0) - - return file_hash - - -def read_function(comm, el, f, hash, dtype, name="uh"): - filename = f"output/mesh_{hash}.bp" - engine = "BP4" - mesh = adios4dolfinx.read_mesh(comm, filename, engine, dolfinx.mesh.GhostMode.shared_facet) - V = dolfinx.fem.functionspace(mesh, el) - v = dolfinx.fem.Function(V, dtype=dtype) - v.name = name - adios4dolfinx.read_function(v, filename, engine) - v_ex = dolfinx.fem.Function(V, dtype=dtype) - v_ex.interpolate(f) - - res = np.finfo(dtype).resolution - np.testing.assert_allclose(v.x.array, v_ex.x.array, atol=10 * res, rtol=10 * res) - - -def get_dtype(in_dtype: np.dtype, is_complex: bool): - dtype: numpy.typing.DTypeLike - if in_dtype == np.float32: - if is_complex: - dtype = np.complex64 - else: - dtype = np.float32 - elif in_dtype == np.float64: - if is_complex: - dtype = np.complex128 - else: - dtype = np.float64 - else: - raise ValueError("Unsuported dtype") - return dtype - - -def write_function_time_dep(mesh, el, f0, f1, t0, t1, dtype) -> str: - V = dolfinx.fem.functionspace(mesh, el) - uh = dolfinx.fem.Function(V, dtype=dtype) - uh.interpolate(f0) - el_hash = ( - V.element.signature() - .replace(" ", "") - .replace(",", "") - .replace("(", "") - .replace(")", "") - .replace("[", "") - .replace("]", "") - ) - file_hash = f"{el_hash}_{np.dtype(dtype).name}" - filename = pathlib.Path(f"output/mesh_{file_hash}.bp") - if mesh.comm.size != 1: - adios4dolfinx.write_mesh(mesh, filename) - adios4dolfinx.write_function(uh, filename, time=t0) - uh.interpolate(f1) - adios4dolfinx.write_function(uh, filename, time=t1) - - else: - if MPI.COMM_WORLD.rank == 0: - adios4dolfinx.write_mesh(mesh, filename) - adios4dolfinx.write_function(uh, filename, time=t0) - uh.interpolate(f1) - adios4dolfinx.write_function(uh, filename, time=t1) - - return file_hash - - -def read_function_time_dep(comm, el, f0, f1, t0, t1, hash, dtype): - filename = f"output/mesh_{hash}.bp" - engine = "BP4" - mesh = adios4dolfinx.read_mesh(comm, filename, engine, dolfinx.mesh.GhostMode.shared_facet) - V = dolfinx.fem.functionspace(mesh, el) - v = dolfinx.fem.Function(V, dtype=dtype) - - adios4dolfinx.read_function(v, filename, engine, time=t1) - v_ex = dolfinx.fem.Function(V, dtype=dtype) - v_ex.interpolate(f1) - - res = np.finfo(dtype).resolution - assert np.allclose(v.x.array, v_ex.x.array, atol=10 * res, rtol=10 * res) - - adios4dolfinx.read_function(v, filename, engine, time=t0) - v_ex = dolfinx.fem.Function(V, dtype=dtype) - v_ex.interpolate(f0) - - res = np.finfo(dtype).resolution - assert np.allclose(v.x.array, v_ex.x.array, atol=10 * res, rtol=10 * res) From 886bd384599db8bcea721352b3ed234e738bf45a Mon Sep 17 00:00:00 2001 From: Henrik Finsberg Date: Sat, 2 Mar 2024 20:29:49 +0100 Subject: [PATCH 14/49] Add context manager (#74) * Add beginning of context manager * More context mangers * Bugfix * Ruff * More ruff * Mypy * Ruff again * Rename context manager to ADIOSFile and add it in checkpointing as well * Fix docstring --------- Co-authored-by: jorgensd --- src/adios4dolfinx/adios2_helpers.py | 273 +++++++++++++++------------ src/adios4dolfinx/checkpointing.py | 277 +++++++++++++++------------- src/adios4dolfinx/legacy_readers.py | 234 ++++++++++++----------- src/adios4dolfinx/snapshot.py | 55 +++--- src/adios4dolfinx/writers.py | 187 +++++++++---------- 5 files changed, 546 insertions(+), 480 deletions(-) diff --git a/src/adios4dolfinx/adios2_helpers.py b/src/adios4dolfinx/adios2_helpers.py index fdac650..5f394a2 100644 --- a/src/adios4dolfinx/adios2_helpers.py +++ b/src/adios4dolfinx/adios2_helpers.py @@ -1,6 +1,8 @@ from __future__ import annotations +from contextlib import contextmanager from pathlib import Path +from typing import NamedTuple from mpi4py import MPI @@ -34,6 +36,29 @@ def resolve_adios_scope(adios2): } +class AdiosFile(NamedTuple): + io: adios2.IO + file: adios2.Engine + + +@contextmanager +def ADIOSFile( + adios: adios2.ADIOS, + filename: Path | str, + engine: str, + mode: adios2.Mode, + io_name: str, +): + io = adios.DeclareIO(io_name) + io.SetEngine(engine) + file = io.Open(str(filename), mode) + try: + yield AdiosFile(io=io, file=file) + finally: + file.Close() + adios.RemoveIO(io_name) + + def read_cell_perms( adios: adios2.ADIOS, comm: MPI.Intracomm, @@ -63,36 +88,38 @@ def read_cell_perms( # Open ADIOS engine io_name = f"{variable=}_reader" - io = adios.DeclareIO(io_name) - io.SetEngine(engine) - infile = io.Open(str(filename), adios2.Mode.Read) - - # Find step that has cell permutation - for i in range(infile.Steps()): - infile.BeginStep() - if variable in io.AvailableVariables().keys(): - break - infile.EndStep() - if variable not in io.AvailableVariables().keys(): - raise KeyError(f"Variable {variable} not found in '{filename}'") - - # Get variable and get global shape - perm_var = io.InquireVariable(variable) - shape = perm_var.Shape() - assert len(shape) == 1 - - # Get local selection - local_cell_range = compute_local_range(comm, num_cells_global) - perm_var.SetSelection([[local_cell_range[0]], [local_cell_range[1] - local_cell_range[0]]]) - in_perm = np.empty( - local_cell_range[1] - local_cell_range[0], - dtype=adios_to_numpy_dtype[perm_var.Type()], - ) - infile.Get(perm_var, in_perm, adios2.Mode.Sync) - infile.EndStep() - - # Close IO and remove io - adios.RemoveIO(io_name) + + with ADIOSFile( + adios=adios, + engine=engine, + filename=filename, + mode=adios2.Mode.Read, + io_name=io_name, + ) as adios_file: + # Find step that has cell permutation + for i in range(adios_file.file.Steps()): + adios_file.file.BeginStep() + if variable in adios_file.io.AvailableVariables().keys(): + break + adios_file.file.EndStep() + if variable not in adios_file.io.AvailableVariables().keys(): + raise KeyError(f"Variable {variable} not found in '{filename}'") + + # Get variable and get global shape + perm_var = adios_file.io.InquireVariable(variable) + shape = perm_var.Shape() + assert len(shape) == 1 + + # Get local selection + local_cell_range = compute_local_range(comm, num_cells_global) + perm_var.SetSelection([[local_cell_range[0]], [local_cell_range[1] - local_cell_range[0]]]) + in_perm = np.empty( + local_cell_range[1] - local_cell_range[0], + dtype=adios_to_numpy_dtype[perm_var.Type()], + ) + adios_file.file.Get(perm_var, in_perm, adios2.Mode.Sync) + adios_file.file.EndStep() + return in_perm @@ -128,47 +155,52 @@ def read_dofmap( # Open ADIOS engine io_name = f"{dofmap=}_reader" - io = adios.DeclareIO(io_name) - io.SetEngine(engine) - infile = io.Open(str(filename), adios2.Mode.Read) - - # First find step with dofmap offsets, to be able to read - # in a full row of the dofmap - for i in range(infile.Steps()): - infile.BeginStep() - if dofmap_offsets in io.AvailableVariables().keys(): - break - infile.EndStep() - if dofmap_offsets not in io.AvailableVariables().keys(): - raise KeyError(f"Dof offsets not found at '{dofmap_offsets}' in {filename}") - - # Get global shape of dofmap-offset, and read in data with an overlap - d_offsets = io.InquireVariable(dofmap_offsets) - shape = d_offsets.Shape() - assert len(shape) == 1 - # As the offsets are one longer than the number of cells, we need to read in with an overlap - d_offsets.SetSelection([[local_cell_range[0]], [local_cell_range[1] + 1 - local_cell_range[0]]]) - in_offsets = np.empty( - local_cell_range[1] + 1 - local_cell_range[0], - dtype=d_offsets.Type().strip("_t"), - ) - infile.Get(d_offsets, in_offsets, adios2.Mode.Sync) - - # Assuming dofmap is saved in stame step - # Get the relevant part of the dofmap - if dofmap not in io.AvailableVariables().keys(): - raise KeyError(f"Dof offsets not found at {dofmap} in {filename}") - cell_dofs = io.InquireVariable(dofmap) - cell_dofs.SetSelection([[in_offsets[0]], [in_offsets[-1] - in_offsets[0]]]) - in_dofmap = np.empty(in_offsets[-1] - in_offsets[0], dtype=cell_dofs.Type().strip("_t")) - infile.Get(cell_dofs, in_dofmap, adios2.Mode.Sync) - - in_dofmap = in_dofmap.astype(np.int64) - in_offsets -= in_offsets[0] - - infile.EndStep() - infile.Close() - adios.RemoveIO(io_name) + + with ADIOSFile( + adios=adios, + engine=engine, + filename=filename, + mode=adios2.Mode.Read, + io_name=io_name, + ) as adios_file: + # First find step with dofmap offsets, to be able to read + # in a full row of the dofmap + for i in range(adios_file.file.Steps()): + adios_file.file.BeginStep() + if dofmap_offsets in adios_file.io.AvailableVariables().keys(): + break + adios_file.file.EndStep() + if dofmap_offsets not in adios_file.io.AvailableVariables().keys(): + raise KeyError(f"Dof offsets not found at '{dofmap_offsets}' in {filename}") + + # Get global shape of dofmap-offset, and read in data with an overlap + d_offsets = adios_file.io.InquireVariable(dofmap_offsets) + shape = d_offsets.Shape() + assert len(shape) == 1 + # As the offsets are one longer than the number of cells, we need to read in with an overlap + d_offsets.SetSelection( + [[local_cell_range[0]], [local_cell_range[1] + 1 - local_cell_range[0]]] + ) + in_offsets = np.empty( + local_cell_range[1] + 1 - local_cell_range[0], + dtype=d_offsets.Type().strip("_t"), + ) + adios_file.file.Get(d_offsets, in_offsets, adios2.Mode.Sync) + + # Assuming dofmap is saved in stame step + # Get the relevant part of the dofmap + if dofmap not in adios_file.io.AvailableVariables().keys(): + raise KeyError(f"Dof offsets not found at {dofmap} in {filename}") + cell_dofs = adios_file.io.InquireVariable(dofmap) + cell_dofs.SetSelection([[in_offsets[0]], [in_offsets[-1] - in_offsets[0]]]) + in_dofmap = np.empty(in_offsets[-1] - in_offsets[0], dtype=cell_dofs.Type().strip("_t")) + adios_file.file.Get(cell_dofs, in_dofmap, adios2.Mode.Sync) + + in_dofmap = in_dofmap.astype(np.int64) + in_offsets -= in_offsets[0] + + adios_file.file.EndStep() + # Return local dofmap return dolfinx.graph.adjacencylist(in_dofmap, in_offsets.astype(np.int32)) @@ -197,59 +229,64 @@ def read_array( Returns: Local part of array and its global starting position """ - io = adios.DeclareIO("ArrayReader") - io.SetEngine(engine) - infile = io.Open(str(filename), adios2.Mode.Read) - # Get time-stamp from first available step - if legacy: - for i in range(infile.Steps()): - infile.BeginStep() - if array_name in io.AvailableVariables().keys(): - break - infile.EndStep() - if array_name not in io.AvailableVariables().keys(): - raise KeyError(f"No array found at {array_name}") - else: - for i in range(infile.Steps()): - infile.BeginStep() - if time_name in io.AvailableVariables().keys(): - arr = io.InquireVariable(time_name) - time_shape = arr.Shape() - arr.SetSelection([[0], [time_shape[0]]]) - times = np.empty(time_shape[0], dtype=adios_to_numpy_dtype[arr.Type()]) - infile.Get(arr, times, adios2.Mode.Sync) - if times[0] == time: + with ADIOSFile( + adios=adios, + engine=engine, + filename=filename, + mode=adios2.Mode.Read, + io_name="ArrayReader", + ) as adios_file: + # Get time-stamp from first available step + if legacy: + for i in range(adios_file.file.Steps()): + adios_file.file.BeginStep() + if array_name in adios_file.io.AvailableVariables().keys(): break - if i == infile.Steps() - 1: + adios_file.file.EndStep() + if array_name not in adios_file.io.AvailableVariables().keys(): + raise KeyError(f"No array found at {array_name}") + else: + for i in range(adios_file.file.Steps()): + adios_file.file.BeginStep() + if time_name in adios_file.io.AvailableVariables().keys(): + arr = adios_file.io.InquireVariable(time_name) + time_shape = arr.Shape() + arr.SetSelection([[0], [time_shape[0]]]) + times = np.empty(time_shape[0], dtype=adios_to_numpy_dtype[arr.Type()]) + adios_file.file.Get(arr, times, adios2.Mode.Sync) + if times[0] == time: + break + if i == adios_file.file.Steps() - 1: + raise KeyError( + f"No data associated with {time_name}={time} found in {filename}" + ) + + adios_file.file.EndStep() + + if time_name not in adios_file.io.AvailableVariables().keys(): raise KeyError(f"No data associated with {time_name}={time} found in {filename}") - infile.EndStep() - - if time_name not in io.AvailableVariables().keys(): - raise KeyError(f"No data associated with {time_name}={time} found in {filename}") + if array_name not in adios_file.io.AvailableVariables().keys(): + raise KeyError(f"No array found at {time=} for {array_name}") - if array_name not in io.AvailableVariables().keys(): - raise KeyError(f"No array found at {time=} for {array_name}") + arr = adios_file.io.InquireVariable(array_name) + arr_shape = arr.Shape() + assert len(arr_shape) >= 1 # TODO: Should we always pick the first element? + arr_range = compute_local_range(comm, arr_shape[0]) - arr = io.InquireVariable(array_name) - arr_shape = arr.Shape() - assert len(arr_shape) >= 1 # TODO: Should we always pick the first element? - arr_range = compute_local_range(comm, arr_shape[0]) + if len(arr_shape) == 1: + arr.SetSelection([[arr_range[0]], [arr_range[1] - arr_range[0]]]) + vals = np.empty(arr_range[1] - arr_range[0], dtype=adios_to_numpy_dtype[arr.Type()]) + else: + arr.SetSelection([[arr_range[0], 0], [arr_range[1] - arr_range[0], arr_shape[1]]]) + vals = np.empty( + (arr_range[1] - arr_range[0], arr_shape[1]), + dtype=adios_to_numpy_dtype[arr.Type()], + ) + assert arr_shape[1] == 1 - if len(arr_shape) == 1: - arr.SetSelection([[arr_range[0]], [arr_range[1] - arr_range[0]]]) - vals = np.empty(arr_range[1] - arr_range[0], dtype=adios_to_numpy_dtype[arr.Type()]) - else: - arr.SetSelection([[arr_range[0], 0], [arr_range[1] - arr_range[0], arr_shape[1]]]) - vals = np.empty( - (arr_range[1] - arr_range[0], arr_shape[1]), - dtype=adios_to_numpy_dtype[arr.Type()], - ) - assert arr_shape[1] == 1 + adios_file.file.Get(arr, vals, adios2.Mode.Sync) + adios_file.file.EndStep() - infile.Get(arr, vals, adios2.Mode.Sync) - infile.EndStep() - infile.Close() - adios.RemoveIO("ArrayReader") return vals.reshape(-1), arr_range[0] diff --git a/src/adios4dolfinx/checkpointing.py b/src/adios4dolfinx/checkpointing.py index db16c0e..8c44082 100644 --- a/src/adios4dolfinx/checkpointing.py +++ b/src/adios4dolfinx/checkpointing.py @@ -17,6 +17,7 @@ import ufl from .adios2_helpers import ( + ADIOSFile, adios_to_numpy_dtype, read_array, read_cell_perms, @@ -84,35 +85,40 @@ def write_meshtags( indices = mesh.geometry.index_map().local_to_global(entities_to_geometry.reshape(-1)) adios = adios2.ADIOS(mesh.comm) - io = adios.DeclareIO("MeshTagWriter") - io.SetEngine(engine) - outfile = io.Open(str(filename), adios2.Mode.Append) - # Write meshtag topology - topology_var = io.DefineVariable( - meshtags.name + "_topology", - indices, - shape=[global_num_tag_entities, num_dofs_per_entity], - start=[local_start, 0], - count=[num_saved_tag_entities, num_dofs_per_entity], - ) - outfile.Put(topology_var, indices, adios2.Mode.Sync) - - # Write meshtag topology - values_var = io.DefineVariable( - meshtags.name + "_values", - local_values, - shape=[global_num_tag_entities], - start=[local_start], - count=[num_saved_tag_entities], - ) - outfile.Put(values_var, local_values, adios2.Mode.Sync) + with ADIOSFile( + adios=adios, + filename=filename, + mode=adios2.Mode.Append, + engine=engine, + io_name="MeshTagWriter", + ) as adios_file: + # Write meshtag topology + topology_var = adios_file.io.DefineVariable( + meshtags.name + "_topology", + indices, + shape=[global_num_tag_entities, num_dofs_per_entity], + start=[local_start, 0], + count=[num_saved_tag_entities, num_dofs_per_entity], + ) + adios_file.file.Put(topology_var, indices, adios2.Mode.Sync) + + # Write meshtag topology + values_var = adios_file.io.DefineVariable( + meshtags.name + "_values", + local_values, + shape=[global_num_tag_entities], + start=[local_start], + count=[num_saved_tag_entities], + ) + adios_file.file.Put(values_var, local_values, adios2.Mode.Sync) - # Write meshtag dim - io.DefineAttribute(meshtags.name + "_dim", np.array([meshtags.dim], dtype=np.uint8)) + # Write meshtag dim + adios_file.io.DefineAttribute( + meshtags.name + "_dim", np.array([meshtags.dim], dtype=np.uint8) + ) - outfile.PerformPuts() - outfile.EndStep() - outfile.Close() + adios_file.file.PerformPuts() + adios_file.file.EndStep() def read_meshtags( @@ -133,62 +139,68 @@ def read_meshtags( The meshtags """ adios = adios2.ADIOS(mesh.comm) - io = adios.DeclareIO("MeshTagsReader") - io.SetEngine(engine) - infile = io.Open(str(filename), adios2.Mode.Read) - - # Get mesh cell type - dim_attr_name = f"{meshtag_name}_dim" - step = 0 - for i in range(infile.Steps()): - infile.BeginStep() - if dim_attr_name in io.AvailableAttributes().keys(): - step = i - break - infile.EndStep() - if dim_attr_name not in io.AvailableAttributes().keys(): - raise KeyError(f"{dim_attr_name} not found in {filename}") - - m_dim = io.InquireAttribute(dim_attr_name) - dim = int(m_dim.Data()[0]) - - # Get mesh tags entites - topology_name = f"{meshtag_name}_topology" - for i in range(step, infile.Steps()): - if i > step: - infile.BeginStep() - if topology_name in io.AvailableVariables().keys(): - break - infile.EndStep() - if topology_name not in io.AvailableVariables().keys(): - raise KeyError(f"{topology_name} not found in {filename}") - - topology = io.InquireVariable(topology_name) - top_shape = topology.Shape() - topology_range = compute_local_range(mesh.comm, top_shape[0]) - - topology.SetSelection( - [[topology_range[0], 0], [topology_range[1] - topology_range[0], top_shape[1]]] - ) - mesh_entities = np.empty((topology_range[1] - topology_range[0], top_shape[1]), dtype=np.int64) - infile.Get(topology, mesh_entities, adios2.Mode.Deferred) - - # Get mesh tags values - values_name = f"{meshtag_name}_values" - if values_name not in io.AvailableVariables().keys(): - raise KeyError(f"{values_name} not found") - - values = io.InquireVariable(values_name) - val_shape = values.Shape() - assert val_shape[0] == top_shape[0] - values.SetSelection([[topology_range[0]], [topology_range[1] - topology_range[0]]]) - tag_values = np.empty((topology_range[1] - topology_range[0]), dtype=np.int32) - infile.Get(values, tag_values, adios2.Mode.Deferred) - - infile.PerformGets() - infile.EndStep() - infile.Close() - assert adios.RemoveIO("MeshTagsReader") + with ADIOSFile( + adios=adios, + filename=filename, + mode=adios2.Mode.Read, + engine=engine, + io_name="MeshTagsReader", + ) as adios_file: + # Get mesh cell type + dim_attr_name = f"{meshtag_name}_dim" + step = 0 + for i in range(adios_file.file.Steps()): + adios_file.file.BeginStep() + if dim_attr_name in adios_file.io.AvailableAttributes().keys(): + step = i + break + adios_file.file.EndStep() + if dim_attr_name not in adios_file.io.AvailableAttributes().keys(): + raise KeyError(f"{dim_attr_name} not found in {filename}") + + m_dim = adios_file.io.InquireAttribute(dim_attr_name) + dim = int(m_dim.Data()[0]) + + # Get mesh tags entites + topology_name = f"{meshtag_name}_topology" + for i in range(step, adios_file.file.Steps()): + if i > step: + adios_file.file.BeginStep() + if topology_name in adios_file.io.AvailableVariables().keys(): + break + adios_file.file.EndStep() + if topology_name not in adios_file.io.AvailableVariables().keys(): + raise KeyError(f"{topology_name} not found in {filename}") + + topology = adios_file.io.InquireVariable(topology_name) + top_shape = topology.Shape() + topology_range = compute_local_range(mesh.comm, top_shape[0]) + + topology.SetSelection( + [ + [topology_range[0], 0], + [topology_range[1] - topology_range[0], top_shape[1]], + ] + ) + mesh_entities = np.empty( + (topology_range[1] - topology_range[0], top_shape[1]), dtype=np.int64 + ) + adios_file.file.Get(topology, mesh_entities, adios2.Mode.Deferred) + + # Get mesh tags values + values_name = f"{meshtag_name}_values" + if values_name not in adios_file.io.AvailableVariables().keys(): + raise KeyError(f"{values_name} not found") + + values = adios_file.io.InquireVariable(values_name) + val_shape = values.Shape() + assert val_shape[0] == top_shape[0] + values.SetSelection([[topology_range[0]], [topology_range[1] - topology_range[0]]]) + tag_values = np.empty((topology_range[1] - topology_range[0]), dtype=np.int32) + adios_file.file.Get(values, tag_values, adios2.Mode.Deferred) + + adios_file.file.PerformGets() + adios_file.file.EndStep() local_entities, local_values = dolfinx.cpp.io.distribute_entity_data( mesh._cpp_object, int(dim), mesh_entities, tag_values @@ -347,52 +359,59 @@ def read_mesh( The distributed mesh """ adios = adios2.ADIOS(comm) - io = adios.DeclareIO("MeshReader") - io.SetEngine(engine) - infile = io.Open(str(filename), adios2.Mode.Read) - infile.BeginStep() - - # Get mesh cell type - if "CellType" not in io.AvailableAttributes().keys(): - raise KeyError(f"Mesh cell type not found at CellType in {filename}") - celltype = io.InquireAttribute("CellType") - cell_type = celltype.DataString()[0] - - # Get basix info - if "LagrangeVariant" not in io.AvailableAttributes().keys(): - raise KeyError(f"Mesh LagrangeVariant not found in {filename}") - lvar = io.InquireAttribute("LagrangeVariant").Data()[0] - if "Degree" not in io.AvailableAttributes().keys(): - raise KeyError(f"Mesh degree not found in {filename}") - degree = io.InquireAttribute("Degree").Data()[0] - - # Get mesh geometry - if "Points" not in io.AvailableVariables().keys(): - raise KeyError(f"Mesh coordinates not found at Points in {filename}") - geometry = io.InquireVariable("Points") - x_shape = geometry.Shape() - geometry_range = compute_local_range(comm, x_shape[0]) - geometry.SetSelection( - [[geometry_range[0], 0], [geometry_range[1] - geometry_range[0], x_shape[1]]] - ) - mesh_geometry = np.empty( - (geometry_range[1] - geometry_range[0], x_shape[1]), - dtype=adios_to_numpy_dtype[geometry.Type()], - ) - infile.Get(geometry, mesh_geometry, adios2.Mode.Deferred) - # Get mesh topology (distributed) - if "Topology" not in io.AvailableVariables().keys(): - raise KeyError(f"Mesh topology not found at Topology in {filename}") - topology = io.InquireVariable("Topology") - shape = topology.Shape() - local_range = compute_local_range(comm, shape[0]) - topology.SetSelection([[local_range[0], 0], [local_range[1] - local_range[0], shape[1]]]) - mesh_topology = np.empty((local_range[1] - local_range[0], shape[1]), dtype=np.int64) - infile.Get(topology, mesh_topology, adios2.Mode.Deferred) - - infile.PerformGets() - infile.EndStep() - assert adios.RemoveIO("MeshReader") + + with ADIOSFile( + adios=adios, + filename=filename, + mode=adios2.Mode.Read, + engine=engine, + io_name="MeshReader", + ) as adios_file: + adios_file.file.BeginStep() + + # Get mesh cell type + if "CellType" not in adios_file.io.AvailableAttributes().keys(): + raise KeyError(f"Mesh cell type not found at CellType in {filename}") + celltype = adios_file.io.InquireAttribute("CellType") + cell_type = celltype.DataString()[0] + + # Get basix info + if "LagrangeVariant" not in adios_file.io.AvailableAttributes().keys(): + raise KeyError(f"Mesh LagrangeVariant not found in {filename}") + lvar = adios_file.io.InquireAttribute("LagrangeVariant").Data()[0] + if "Degree" not in adios_file.io.AvailableAttributes().keys(): + raise KeyError(f"Mesh degree not found in {filename}") + degree = adios_file.io.InquireAttribute("Degree").Data()[0] + + # Get mesh geometry + if "Points" not in adios_file.io.AvailableVariables().keys(): + raise KeyError(f"Mesh coordinates not found at Points in {filename}") + geometry = adios_file.io.InquireVariable("Points") + x_shape = geometry.Shape() + geometry_range = compute_local_range(comm, x_shape[0]) + geometry.SetSelection( + [ + [geometry_range[0], 0], + [geometry_range[1] - geometry_range[0], x_shape[1]], + ] + ) + mesh_geometry = np.empty( + (geometry_range[1] - geometry_range[0], x_shape[1]), + dtype=adios_to_numpy_dtype[geometry.Type()], + ) + adios_file.file.Get(geometry, mesh_geometry, adios2.Mode.Deferred) + # Get mesh topology (distributed) + if "Topology" not in adios_file.io.AvailableVariables().keys(): + raise KeyError(f"Mesh topology not found at Topology in {filename}") + topology = adios_file.io.InquireVariable("Topology") + shape = topology.Shape() + local_range = compute_local_range(comm, shape[0]) + topology.SetSelection([[local_range[0], 0], [local_range[1] - local_range[0], shape[1]]]) + mesh_topology = np.empty((local_range[1] - local_range[0], shape[1]), dtype=np.int64) + adios_file.file.Get(topology, mesh_topology, adios2.Mode.Deferred) + + adios_file.file.PerformGets() + adios_file.file.EndStep() # Create DOLFINx mesh element = basix.ufl.element( diff --git a/src/adios4dolfinx/legacy_readers.py b/src/adios4dolfinx/legacy_readers.py index b4441d6..6df3bfe 100644 --- a/src/adios4dolfinx/legacy_readers.py +++ b/src/adios4dolfinx/legacy_readers.py @@ -17,9 +17,19 @@ import numpy.typing as npt import ufl -from .adios2_helpers import adios_to_numpy_dtype, read_array, resolve_adios_scope +from .adios2_helpers import ( + ADIOSFile, + adios_to_numpy_dtype, + read_array, + resolve_adios_scope, +) from .comm_helpers import send_dofs_and_recv_values -from .utils import compute_dofmap_pos, compute_insert_position, compute_local_range, index_owner +from .utils import ( + compute_dofmap_pos, + compute_insert_position, + compute_local_range, + index_owner, +) adios2 = resolve_adios_scope(adios2) @@ -65,83 +75,88 @@ def read_dofmap_legacy( # Open ADIOS engine adios = adios2.ADIOS(comm) - io = adios.DeclareIO("DofmapReader") - io.SetEngine(engine) - infile = io.Open(str(filename), adios2.Mode.Read) + with ADIOSFile( + adios=adios, + filename=filename, + mode=adios2.Mode.Read, + engine=engine, + io_name="DofmapReader", + ) as adios_file: + for i in range(adios_file.file.Steps()): + adios_file.file.BeginStep() + if dofmap_offsets in adios_file.io.AvailableVariables().keys(): + break + adios_file.file.EndStep() + + d_offsets = adios_file.io.InquireVariable(dofmap_offsets) + shape = d_offsets.Shape() + + # As the offsets are one longer than the number of cells, we need to read in with an overlap + if len(shape) == 1: + d_offsets.SetSelection( + [[local_cell_range[0]], [local_cell_range[1] + 1 - local_cell_range[0]]] + ) + in_offsets = np.empty( + local_cell_range[1] + 1 - local_cell_range[0], + dtype=d_offsets.Type().strip("_t"), + ) + else: + d_offsets.SetSelection( + [ + [local_cell_range[0], 0], + [local_cell_range[1] + 1 - local_cell_range[0], shape[1]], + ] + ) + in_offsets = np.empty( + (local_cell_range[1] + 1 - local_cell_range[0], shape[1]), + dtype=d_offsets.Type().strip("_t"), + ) + + in_offsets = in_offsets.squeeze() + adios_file.file.Get(d_offsets, in_offsets, adios2.Mode.Sync) + # Get the relevant part of the dofmap + if dofmap not in adios_file.io.AvailableVariables().keys(): + raise KeyError(f"Dof offsets not found at {dofmap}") + cell_dofs = adios_file.io.InquireVariable(dofmap) + + if len(shape) == 1: + cell_dofs.SetSelection([[in_offsets[0]], [in_offsets[-1] - in_offsets[0]]]) + in_dofmap = np.empty(in_offsets[-1] - in_offsets[0], dtype=cell_dofs.Type().strip("_t")) + else: + cell_dofs.SetSelection([[in_offsets[0], 0], [in_offsets[-1] - in_offsets[0], shape[1]]]) + in_dofmap = np.empty( + (in_offsets[-1] - in_offsets[0], shape[1]), + dtype=cell_dofs.Type().strip("_t"), + ) + assert shape[1] == 1 + + adios_file.file.Get(cell_dofs, in_dofmap, adios2.Mode.Sync) + + in_dofmap = in_dofmap.reshape(-1).astype(np.int64) + + # Map xxxyyyzzz to xyzxyz + mapped_dofmap = np.empty_like(in_dofmap) + for i in range(len(in_offsets) - 1): + pos_begin, pos_end = ( + in_offsets[i] - in_offsets[0], + in_offsets[i + 1] - in_offsets[0], + ) + dofs_i = in_dofmap[pos_begin:pos_end] + assert (pos_end - pos_begin) % bs == 0 + num_dofs_local = int((pos_end - pos_begin) // bs) + for k in range(bs): + for j in range(num_dofs_local): + mapped_dofmap[int(pos_begin + j * bs + k)] = dofs_i[int(num_dofs_local * k + j)] + + # Extract dofmap data + global_dofs = np.zeros_like(cells, dtype=np.int64) + input_cell_positions = cells - local_cell_range[0] + read_pos = in_offsets[input_cell_positions].astype(np.int32) + dof_pos - in_offsets[0] + global_dofs = mapped_dofmap[read_pos] + del input_cell_positions, read_pos + + adios_file.file.EndStep() - for i in range(infile.Steps()): - infile.BeginStep() - if dofmap_offsets in io.AvailableVariables().keys(): - break - infile.EndStep() - - d_offsets = io.InquireVariable(dofmap_offsets) - shape = d_offsets.Shape() - - # As the offsets are one longer than the number of cells, we need to read in with an overlap - if len(shape) == 1: - d_offsets.SetSelection( - [[local_cell_range[0]], [local_cell_range[1] + 1 - local_cell_range[0]]] - ) - in_offsets = np.empty( - local_cell_range[1] + 1 - local_cell_range[0], - dtype=d_offsets.Type().strip("_t"), - ) - else: - d_offsets.SetSelection( - [ - [local_cell_range[0], 0], - [local_cell_range[1] + 1 - local_cell_range[0], shape[1]], - ] - ) - in_offsets = np.empty( - (local_cell_range[1] + 1 - local_cell_range[0], shape[1]), - dtype=d_offsets.Type().strip("_t"), - ) - - in_offsets = in_offsets.squeeze() - infile.Get(d_offsets, in_offsets, adios2.Mode.Sync) - # Get the relevant part of the dofmap - if dofmap not in io.AvailableVariables().keys(): - raise KeyError(f"Dof offsets not found at {dofmap}") - cell_dofs = io.InquireVariable(dofmap) - - if len(shape) == 1: - cell_dofs.SetSelection([[in_offsets[0]], [in_offsets[-1] - in_offsets[0]]]) - in_dofmap = np.empty(in_offsets[-1] - in_offsets[0], dtype=cell_dofs.Type().strip("_t")) - else: - cell_dofs.SetSelection([[in_offsets[0], 0], [in_offsets[-1] - in_offsets[0], shape[1]]]) - in_dofmap = np.empty( - (in_offsets[-1] - in_offsets[0], shape[1]), - dtype=cell_dofs.Type().strip("_t"), - ) - assert shape[1] == 1 - - infile.Get(cell_dofs, in_dofmap, adios2.Mode.Sync) - - in_dofmap = in_dofmap.reshape(-1).astype(np.int64) - - # Map xxxyyyzzz to xyzxyz - num_dofs = in_offsets[-1] - in_offsets[0] - assert num_dofs == len(in_dofmap) and (num_dofs) % bs == 0 - num_dofs_per_cell = in_offsets[1:] - in_offsets[:-1] - assert np.allclose( - num_dofs_per_cell, num_dofs_per_cell[0] - ), "Non-uniform number of dofs per cell" - num_cells = len(in_offsets) - 1 - nd_dofmap = in_dofmap.reshape(num_cells, bs, int(num_dofs_per_cell[0] // bs)) - mapped_dofmap = np.swapaxes(nd_dofmap, 1, 2).reshape(-1) - - # Extract dofmap data - global_dofs = np.zeros_like(cells, dtype=np.int64) - input_cell_positions = cells - local_cell_range[0] - read_pos = in_offsets[input_cell_positions].astype(np.int32) + dof_pos - in_offsets[0] - global_dofs = mapped_dofmap[read_pos] - del input_cell_positions, read_pos - - infile.EndStep() - infile.Close() - adios.RemoveIO("DofmapReader") return global_dofs @@ -263,40 +278,39 @@ def read_mesh_from_legacy_h5( """ # Create ADIOS2 reader adios = adios2.ADIOS(comm) - io = adios.DeclareIO("Mesh reader") - - io.SetEngine("HDF5") - - # Make sure we use the HDF5File and check that the file is present - filename = pathlib.Path(filename).with_suffix(".h5") - if not filename.is_file(): - raise FileNotFoundError(f"File {filename} does not exist") - - # Open ADIOS2 Reader - infile = io.Open(str(filename), adios2.Mode.Read) - # Get mesh topology (distributed) - if f"{group}/topology" not in io.AvailableVariables().keys(): - raise KeyError(f"Mesh topology not found at '{group}/topology'") - topology = io.InquireVariable(f"{group}/topology") - shape = topology.Shape() - local_range = compute_local_range(MPI.COMM_WORLD, shape[0]) - topology.SetSelection([[local_range[0], 0], [local_range[1] - local_range[0], shape[1]]]) - - mesh_topology = np.empty( - (local_range[1] - local_range[0], shape[1]), dtype=topology.Type().strip("_t") - ) - infile.Get(topology, mesh_topology, adios2.Mode.Sync) - - # Get mesh cell type - if f"{group}/topology/celltype" in io.AvailableAttributes().keys(): - celltype = io.InquireAttribute(f"{group}/topology/celltype") - cell_type = celltype.DataString()[0] + with ADIOSFile( + adios=adios, + filename=filename, + mode=adios2.Mode.Read, + io_name="Mesh reader", + engine="HDF5", + ) as adios_file: + # Make sure we use the HDF5File and check that the file is present + filename = pathlib.Path(filename).with_suffix(".h5") + if not filename.is_file(): + raise FileNotFoundError(f"File {filename} does not exist") + + # Get mesh topology (distributed) + if f"{group}/topology" not in adios_file.io.AvailableVariables().keys(): + raise KeyError(f"Mesh topology not found at '{group}/topology'") + topology = adios_file.io.InquireVariable(f"{group}/topology") + shape = topology.Shape() + local_range = compute_local_range(MPI.COMM_WORLD, shape[0]) + topology.SetSelection([[local_range[0], 0], [local_range[1] - local_range[0], shape[1]]]) + + mesh_topology = np.empty( + (local_range[1] - local_range[0], shape[1]), + dtype=topology.Type().strip("_t"), + ) + adios_file.file.Get(topology, mesh_topology, adios2.Mode.Sync) - # Get mesh geometry - mesh_geometry = read_mesh_geometry(io=io, infile=infile, group=group) + # Get mesh cell type + if f"{group}/topology/celltype" in adios_file.io.AvailableAttributes().keys(): + celltype = adios_file.io.InquireAttribute(f"{group}/topology/celltype") + cell_type = celltype.DataString()[0] - infile.Close() - assert adios.RemoveIO("Mesh reader") + # Get mesh geometry + mesh_geometry = read_mesh_geometry(io=adios_file.io, infile=adios_file.file, group=group) # Create DOLFINx mesh element = basix.ufl.element( diff --git a/src/adios4dolfinx/snapshot.py b/src/adios4dolfinx/snapshot.py index c797377..c1e0577 100644 --- a/src/adios4dolfinx/snapshot.py +++ b/src/adios4dolfinx/snapshot.py @@ -9,7 +9,7 @@ import adios2 import dolfinx -from .adios2_helpers import resolve_adios_scope +from .adios2_helpers import ADIOSFile, resolve_adios_scope adios2 = resolve_adios_scope(adios2) @@ -27,31 +27,32 @@ def snapshot_checkpoint(uh: dolfinx.fem.Function, file: Path, mode: adios2.Mode) :param file: The file to write to or read from :param mode: Either read or write """ - # Create ADIOS IO - adios = adios2.ADIOS(uh.function_space.mesh.comm) - io_name = "SnapshotCheckPoint" - io = adios.DeclareIO(io_name) - io.SetEngine("BP4") + if mode not in [adios2.Mode.Write, adios2.Mode.Read]: raise ValueError("Got invalid mode {mode}") - adios_file = io.Open(str(file), mode) - - if mode == adios2.Mode.Write: - dofmap = uh.function_space.dofmap - num_dofs_local = dofmap.index_map.size_local * dofmap.index_map_bs - local_dofs = uh.x.array[:num_dofs_local].copy() - - # Write to file - adios_file.BeginStep() - dofs = io.DefineVariable("dofs", local_dofs, count=[num_dofs_local]) - adios_file.Put(dofs, local_dofs, adios2.Mode.Sync) - adios_file.EndStep() - else: - adios_file.BeginStep() - in_variable = io.InquireVariable("dofs") - in_variable.SetBlockSelection(uh.function_space.mesh.comm.rank) - adios_file.Get(in_variable, uh.x.array, adios2.Mode.Sync) - adios_file.EndStep() - uh.x.scatter_forward() - adios_file.Close() - adios.RemoveIO(io_name) + # Create ADIOS IO + adios = adios2.ADIOS(uh.function_space.mesh.comm) + with ADIOSFile( + adios=adios, + filename=file, + mode=mode, + io_name="SnapshotCheckPoint", + engine="BP4", + ) as adios_file: + if mode == adios2.Mode.Write: + dofmap = uh.function_space.dofmap + num_dofs_local = dofmap.index_map.size_local * dofmap.index_map_bs + local_dofs = uh.x.array[:num_dofs_local].copy() + + # Write to file + adios_file.file.BeginStep() + dofs = adios_file.io.DefineVariable("dofs", local_dofs, count=[num_dofs_local]) + adios_file.file.Put(dofs, local_dofs, adios2.Mode.Sync) + adios_file.file.EndStep() + else: + adios_file.file.BeginStep() + in_variable = adios_file.io.InquireVariable("dofs") + in_variable.SetBlockSelection(uh.function_space.mesh.comm.rank) + adios_file.file.Get(in_variable, uh.x.array, adios2.Mode.Sync) + adios_file.file.EndStep() + uh.x.scatter_forward() diff --git a/src/adios4dolfinx/writers.py b/src/adios4dolfinx/writers.py index 8f5ac01..5f9ff48 100644 --- a/src/adios4dolfinx/writers.py +++ b/src/adios4dolfinx/writers.py @@ -12,7 +12,7 @@ import adios2 import numpy as np -from .adios2_helpers import resolve_adios_scope +from .adios2_helpers import ADIOSFile, resolve_adios_scope from .structures import FunctionData, MeshData adios2 = resolve_adios_scope(adios2) @@ -40,46 +40,44 @@ def write_mesh( gdim = mesh.local_geometry.shape[1] adios = adios2.ADIOS(comm) - # TODO: add context manager here? - io = adios.DeclareIO(io_name) - io.SetEngine(engine) - outfile = io.Open(str(filename), mode) - - # Write geometry - pointvar = io.DefineVariable( - "Points", - mesh.local_geometry, - shape=[mesh.num_nodes_global, gdim], - start=[mesh.local_geometry_pos[0], 0], - count=[mesh.local_geometry_pos[1] - mesh.local_geometry_pos[0], gdim], - ) - outfile.Put(pointvar, mesh.local_geometry, adios2.Mode.Sync) - - # Write celltype - io.DefineAttribute("CellType", mesh.cell_type) - - # Write basix properties - io.DefineAttribute("Degree", np.array([mesh.degree], dtype=np.int32)) - io.DefineAttribute("LagrangeVariant", np.array([mesh.lagrange_variant], dtype=np.int32)) - - # Write topology - num_dofs_per_cell = mesh.local_topology.shape[1] - dvar = io.DefineVariable( - "Topology", - mesh.local_topology, - shape=[mesh.num_cells_global, num_dofs_per_cell], - start=[mesh.local_topology_pos[0], 0], - count=[ - mesh.local_topology_pos[1] - mesh.local_topology_pos[0], - num_dofs_per_cell, - ], - ) - - outfile.Put(dvar, mesh.local_topology) - outfile.PerformPuts() - outfile.EndStep() - outfile.Close() - assert adios.RemoveIO(io_name) + with ADIOSFile( + adios=adios, filename=filename, mode=mode, engine=engine, io_name=io_name + ) as adios_file: + # Write geometry + pointvar = adios_file.io.DefineVariable( + "Points", + mesh.local_geometry, + shape=[mesh.num_nodes_global, gdim], + start=[mesh.local_geometry_pos[0], 0], + count=[mesh.local_geometry_pos[1] - mesh.local_geometry_pos[0], gdim], + ) + adios_file.file.Put(pointvar, mesh.local_geometry, adios2.Mode.Sync) + + # Write celltype + adios_file.io.DefineAttribute("CellType", mesh.cell_type) + + # Write basix properties + adios_file.io.DefineAttribute("Degree", np.array([mesh.degree], dtype=np.int32)) + adios_file.io.DefineAttribute( + "LagrangeVariant", np.array([mesh.lagrange_variant], dtype=np.int32) + ) + + # Write topology + num_dofs_per_cell = mesh.local_topology.shape[1] + dvar = adios_file.io.DefineVariable( + "Topology", + mesh.local_topology, + shape=[mesh.num_cells_global, num_dofs_per_cell], + start=[mesh.local_topology_pos[0], 0], + count=[ + mesh.local_topology_pos[1] - mesh.local_topology_pos[0], + num_dofs_per_cell, + ], + ) + + adios_file.file.Put(dvar, mesh.local_topology) + adios_file.file.PerformPuts() + adios_file.file.EndStep() def write_function( @@ -104,58 +102,55 @@ def write_function( io_name: Internal name used for the ADIOS IO object """ adios = adios2.ADIOS(comm) - # TODO: add context manager here? - io = adios.DeclareIO(io_name) - io.SetEngine(engine) - outfile = io.Open(str(filename), mode) - - # Add mesh permutations - pvar = io.DefineVariable( - "CellPermutations", - u.cell_permutations, - shape=[u.num_cells_global], - start=[u.local_cell_range[0]], - count=[u.local_cell_range[1] - u.local_cell_range[0]], - ) - outfile.Put(pvar, u.cell_permutations) - dofmap_var = io.DefineVariable( - f"{u.name}_dofmap", - u.dofmap_array, - shape=[u.global_dofs_in_dofmap], - start=[u.dofmap_range[0]], - count=[u.dofmap_range[1] - u.dofmap_range[0]], - ) - outfile.Put(dofmap_var, u.dofmap_array) - - xdofmap_var = io.DefineVariable( - f"{u.name}_XDofmap", - u.dofmap_offsets, - shape=[u.num_cells_global + 1], - start=[u.local_cell_range[0]], - count=[u.local_cell_range[1] - u.local_cell_range[0] + 1], - ) - outfile.Put(xdofmap_var, u.dofmap_offsets) - - val_var = io.DefineVariable( - f"{u.name}_values", - u.values, - shape=[u.num_dofs_global], - start=[u.dof_range[0]], - count=[u.dof_range[1] - u.dof_range[0]], - ) - outfile.Put(val_var, u.values) - - # Add time step to file - t_arr = np.array([time], dtype=np.float64) - time_var = io.DefineVariable( - f"{u.name}_time", - t_arr, - shape=[1], - start=[0], - count=[1 if comm.rank == 0 else 0], - ) - outfile.Put(time_var, t_arr) - outfile.PerformPuts() - outfile.EndStep() - outfile.Close() - assert adios.RemoveIO(io_name) + + with ADIOSFile( + adios=adios, filename=filename, mode=mode, engine=engine, io_name=io_name + ) as adios_file: + # Add mesh permutations + pvar = adios_file.io.DefineVariable( + "CellPermutations", + u.cell_permutations, + shape=[u.num_cells_global], + start=[u.local_cell_range[0]], + count=[u.local_cell_range[1] - u.local_cell_range[0]], + ) + adios_file.file.Put(pvar, u.cell_permutations) + dofmap_var = adios_file.io.DefineVariable( + f"{u.name}_dofmap", + u.dofmap_array, + shape=[u.global_dofs_in_dofmap], + start=[u.dofmap_range[0]], + count=[u.dofmap_range[1] - u.dofmap_range[0]], + ) + adios_file.file.Put(dofmap_var, u.dofmap_array) + + xdofmap_var = adios_file.io.DefineVariable( + f"{u.name}_XDofmap", + u.dofmap_offsets, + shape=[u.num_cells_global + 1], + start=[u.local_cell_range[0]], + count=[u.local_cell_range[1] - u.local_cell_range[0] + 1], + ) + adios_file.file.Put(xdofmap_var, u.dofmap_offsets) + + val_var = adios_file.io.DefineVariable( + f"{u.name}_values", + u.values, + shape=[u.num_dofs_global], + start=[u.dof_range[0]], + count=[u.dof_range[1] - u.dof_range[0]], + ) + adios_file.file.Put(val_var, u.values) + + # Add time step to file + t_arr = np.array([time], dtype=np.float64) + time_var = adios_file.io.DefineVariable( + f"{u.name}_time", + t_arr, + shape=[1], + start=[0], + count=[1 if comm.rank == 0 else 0], + ) + adios_file.file.Put(time_var, t_arr) + adios_file.file.PerformPuts() + adios_file.file.EndStep() From 9e1e98376c9b35d3a439f9fa2415d2b472ecfc60 Mon Sep 17 00:00:00 2001 From: Henrik Finsberg Date: Sat, 2 Mar 2024 20:44:07 +0100 Subject: [PATCH 15/49] Add pre-commit hooks (just to not drive Henrik crazy) (#75) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Jørgen Schartum Dokken --- .pre-commit-config.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..e378ec7 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,17 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: 'v0.2.2' + hooks: + # Run the linter. + - id: ruff + args: [ --fix ] + # Run the formatter. + - id: ruff-format + + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.8.0 + hooks: + - id: mypy From aad9112116e1faea098f3f24fb1a4a577a0ef236 Mon Sep 17 00:00:00 2001 From: Henrik Finsberg Date: Sat, 2 Mar 2024 21:06:10 +0100 Subject: [PATCH 16/49] Make it possible to pass name when reading function (#76) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Make it possible to pass name when reading function * Make it possible to pass name when writing function * Update docstring * Ruff formatting --------- Co-authored-by: Jørgen Schartum Dokken Co-authored-by: jorgensd --- src/adios4dolfinx/checkpointing.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/adios4dolfinx/checkpointing.py b/src/adios4dolfinx/checkpointing.py index 8c44082..8d7bd3f 100644 --- a/src/adios4dolfinx/checkpointing.py +++ b/src/adios4dolfinx/checkpointing.py @@ -224,6 +224,7 @@ def read_function( engine: str = "BP4", time: float = 0.0, legacy: bool = False, + name: str | None = None, ): """ Read checkpoint from file and fill it into `u`. @@ -232,12 +233,15 @@ def read_function( filename: Path to checkpoint u: Function to fill engine: ADIOS engine type used for reading + time: Time-stamp associated with checkpoint legacy: If checkpoint is from prior to time-dependent writing set to True + name: If not provided, `u.name` is used to search through the input file for the function """ mesh = u.function_space.mesh comm = mesh.comm adios = adios2.ADIOS(comm) - name = u.name + if name is None: + name = u.name # ----------------------Step 1--------------------------------- # Compute index of input cells and get cell permutation num_owned_cells = mesh.topology.index_map(mesh.topology.dim).size_local @@ -487,6 +491,7 @@ def write_function( engine: str = "BP4", mode: adios2.Mode = adios2.Mode.Append, time: float = 0.0, + name: str | None = None, ): """ Write function checkpoint to file. @@ -497,6 +502,7 @@ def write_function( engine: ADIOS2 engine mode: Write or append. time: Time-stamp for simulation + name: Name of function to write. If None, the name of the function is used. """ dofmap = u.function_space.dofmap values = u.x.array @@ -546,7 +552,7 @@ def write_function( values=values[:num_dofs_local].copy(), dof_range=local_dof_range, num_dofs_global=num_dofs_global, - name=u.name, + name=name or u.name, ) # Write to file fname = Path(filename) From 62371a634acb1b7da10d560210f9a14c48198e61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Sat, 2 Mar 2024 21:38:28 +0100 Subject: [PATCH 17/49] Remove old paths and update CI name (#77) --- .github/workflows/build_docs.yml | 7 +------ .github/workflows/check_formatting.yml | 1 - .github/workflows/create_legacy_data.yml | 2 +- .github/workflows/test_package.yml | 1 - 4 files changed, 2 insertions(+), 9 deletions(-) diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index 432e477..dde4acf 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -1,18 +1,14 @@ -# Simple workflow for deploying static content to GitHub Pages -name: Deploy static content to Pages +name: Build documentation on: pull_request: - # The CI is executed on every pull request to the main branch branches: - main - # Allows you to run this workflow manually from the Actions tab workflow_dispatch: workflow_call: env: - # Directory that will be published on github pages PUBLISH_DIR: ./_build/html DEB_PYTHON_INSTALL_LAYOUT: deb_system ARTIFACT_NAME: "docs" @@ -20,7 +16,6 @@ env: jobs: build-docs: env: - PYTHONPATH: /usr/local/lib/python3/dist-packages:/usr/local/lib:/usr/local/dolfinx-real/lib/python3.10/dist-packages DEB_PYTHON_INSTALL_LAYOUT: deb_system runs-on: ubuntu-22.04 diff --git a/.github/workflows/check_formatting.yml b/.github/workflows/check_formatting.yml index 102ebe9..3920d5c 100644 --- a/.github/workflows/check_formatting.yml +++ b/.github/workflows/check_formatting.yml @@ -6,7 +6,6 @@ on: jobs: check-code: env: - PYTHONPATH: /usr/local/lib/python3/dist-packages:/usr/local/lib:/usr/local/dolfinx-real/lib/python3.10/dist-packages DEB_PYTHON_INSTALL_LAYOUT: deb_system runs-on: ubuntu-22.04 diff --git a/.github/workflows/create_legacy_data.yml b/.github/workflows/create_legacy_data.yml index d5d5f6f..fa7c127 100644 --- a/.github/workflows/create_legacy_data.yml +++ b/.github/workflows/create_legacy_data.yml @@ -15,7 +15,7 @@ jobs: data_dir: "legacy" runs-on: "ubuntu-22.04" - container: ghcr.io/scientificcomputing/fenics:2023-11-15 + container: ghcr.io/scientificcomputing/fenics:2024-02-19 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/test_package.yml b/.github/workflows/test_package.yml index 102c58c..b3e5ff7 100644 --- a/.github/workflows/test_package.yml +++ b/.github/workflows/test_package.yml @@ -37,7 +37,6 @@ jobs: container: ghcr.io/fenics/dolfinx/dolfinx:nightly env: DEB_PYTHON_INSTALL_LAYOUT: deb_system - PYTHONPATH: /usr/local/lib/python3/dist-packages:/usr/local/lib:/usr/local/dolfinx-real/lib/python3.10/dist-packages steps: - uses: actions/checkout@v4 From 4b33809add1c741df2ae285a8de50e09ce23a979 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Sat, 2 Mar 2024 22:41:40 +0100 Subject: [PATCH 18/49] Use tmp-dir and tmp_path to store test files. (#78) * Use tmp-dir and tmp_path to store test files. * Fix typo * Fix paths for MPI by broadcasting from rank 0 * Add handling to orignal checkpoint as well (even if we do not use MPI directly there atm. * Add tmp_path to test_snapshot_checkpoint --------- Co-authored-by: Henrik Finsberg --- pyproject.toml | 2 +- tests/conftest.py | 35 ++++++----- tests/test_checkpointing_vector.py | 8 +-- tests/test_mesh_writer.py | 9 +-- tests/test_meshtags.py | 15 +++-- tests/test_original_checkpoint.py | 93 +++++++++++------------------- tests/test_snapshot_checkpoint.py | 20 ++++--- 7 files changed, 81 insertions(+), 101 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index afe2b7b..231a703 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ requires = ["setuptools>=61.0.0", "wheel"] [project] name = "adios4dolfinx" version = "0.8.0.dev0" -description = "Wrappers for reading/writing DOLFINx meshes/functions with ADIOS2" +description = "Checkpointing functionality for DOLFINx meshes/functions with ADIOS2" authors = [{ name = "Jørgen S. Dokken", email = "dokken@simula.no" }] license = { file = "LICENSE" } readme = "README.md" diff --git a/tests/conftest.py b/tests/conftest.py index eadbc47..0e16ddb 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,5 +1,3 @@ -import pathlib - from mpi4py import MPI import dolfinx @@ -20,7 +18,7 @@ def cluster(): @pytest.fixture(scope="function") -def write_function(): +def write_function(tmp_path): def _write_function(mesh, el, f, dtype, name="uh", append: bool = False) -> str: V = dolfinx.fem.functionspace(mesh, el) uh = dolfinx.fem.Function(V, dtype=dtype) @@ -35,9 +33,10 @@ def _write_function(mesh, el, f, dtype, name="uh", append: bool = False) -> str: .replace("[", "") .replace("]", "") ) - + # Consistent tmp dir across processes + f_path = MPI.COMM_WORLD.bcast(tmp_path, root=0) file_hash = f"{el_hash}_{np.dtype(dtype).name}" - filename = pathlib.Path(f"output/mesh_{file_hash}.bp") + filename = f_path / f"mesh_{file_hash}.bp" if mesh.comm.size != 1: if not append: adios4dolfinx.write_mesh(filename, mesh) @@ -48,21 +47,20 @@ def _write_function(mesh, el, f, dtype, name="uh", append: bool = False) -> str: adios4dolfinx.write_mesh(filename, mesh) adios4dolfinx.write_function(filename, uh, time=0.0) - return file_hash + return filename return _write_function @pytest.fixture(scope="function") def read_function(): - def _read_function(comm, el, f, hash, dtype, name="uh"): - filename = f"output/mesh_{hash}.bp" + def _read_function(comm, el, f, path, dtype, name="uh"): engine = "BP4" - mesh = adios4dolfinx.read_mesh(filename, comm, engine, dolfinx.mesh.GhostMode.shared_facet) + mesh = adios4dolfinx.read_mesh(path, comm, engine, dolfinx.mesh.GhostMode.shared_facet) V = dolfinx.fem.functionspace(mesh, el) v = dolfinx.fem.Function(V, dtype=dtype) v.name = name - adios4dolfinx.read_function(filename, v, engine) + adios4dolfinx.read_function(path, v, engine) v_ex = dolfinx.fem.Function(V, dtype=dtype) v_ex.interpolate(f) @@ -94,7 +92,7 @@ def _get_dtype(in_dtype: np.dtype, is_complex: bool): @pytest.fixture(scope="function") -def write_function_time_dep(): +def write_function_time_dep(tmp_path): def _write_function_time_dep(mesh, el, f0, f1, t0, t1, dtype) -> str: V = dolfinx.fem.functionspace(mesh, el) uh = dolfinx.fem.Function(V, dtype=dtype) @@ -109,7 +107,9 @@ def _write_function_time_dep(mesh, el, f0, f1, t0, t1, dtype) -> str: .replace("]", "") ) file_hash = f"{el_hash}_{np.dtype(dtype).name}" - filename = pathlib.Path(f"output/mesh_{file_hash}.bp") + # Consistent tmp dir across processes + f_path = MPI.COMM_WORLD.bcast(tmp_path, root=0) + filename = f_path / f"mesh_{file_hash}.bp" if mesh.comm.size != 1: adios4dolfinx.write_mesh(filename, mesh) adios4dolfinx.write_function(filename, uh, time=t0) @@ -123,28 +123,27 @@ def _write_function_time_dep(mesh, el, f0, f1, t0, t1, dtype) -> str: uh.interpolate(f1) adios4dolfinx.write_function(filename, uh, time=t1) - return file_hash + return filename return _write_function_time_dep @pytest.fixture(scope="function") def read_function_time_dep(): - def _read_function_time_dep(comm, el, f0, f1, t0, t1, hash, dtype): - filename = f"output/mesh_{hash}.bp" + def _read_function_time_dep(comm, el, f0, f1, t0, t1, path, dtype): engine = "BP4" - mesh = adios4dolfinx.read_mesh(filename, comm, engine, dolfinx.mesh.GhostMode.shared_facet) + mesh = adios4dolfinx.read_mesh(path, comm, engine, dolfinx.mesh.GhostMode.shared_facet) V = dolfinx.fem.functionspace(mesh, el) v = dolfinx.fem.Function(V, dtype=dtype) - adios4dolfinx.read_function(filename, v, engine, time=t1) + adios4dolfinx.read_function(path, v, engine, time=t1) v_ex = dolfinx.fem.Function(V, dtype=dtype) v_ex.interpolate(f1) res = np.finfo(dtype).resolution assert np.allclose(v.x.array, v_ex.x.array, atol=10 * res, rtol=10 * res) - adios4dolfinx.read_function(filename, v, engine, time=t0) + adios4dolfinx.read_function(path, v, engine, time=t0) v_ex = dolfinx.fem.Function(V, dtype=dtype) v_ex.interpolate(f0) diff --git a/tests/test_checkpointing_vector.py b/tests/test_checkpointing_vector.py index 869647c..039ec23 100644 --- a/tests/test_checkpointing_vector.py +++ b/tests/test_checkpointing_vector.py @@ -67,9 +67,9 @@ def f(x): values[1] += 2j * x[0] return values - hash = write_function(mesh, el, f, f_dtype) + fname = write_function(mesh, el, f, f_dtype) MPI.COMM_WORLD.Barrier() - read_function(read_comm, el, f, hash, f_dtype) + read_function(read_comm, el, f, fname, f_dtype) @pytest.mark.parametrize("is_complex", [True, False]) @@ -93,9 +93,9 @@ def f(x): values[1] += 2j * np.cos(x[2]) return values - hash = write_function(mesh, el, f, dtype=f_dtype) + fname = write_function(mesh, el, f, dtype=f_dtype) MPI.COMM_WORLD.Barrier() - read_function(read_comm, el, f, hash, dtype=f_dtype) + read_function(read_comm, el, f, fname, dtype=f_dtype) @pytest.mark.parametrize("is_complex", [True, False]) diff --git a/tests/test_mesh_writer.py b/tests/test_mesh_writer.py index 26b5483..4902b35 100644 --- a/tests/test_mesh_writer.py +++ b/tests/test_mesh_writer.py @@ -1,4 +1,3 @@ -import pathlib import time from mpi4py import MPI @@ -14,10 +13,12 @@ @pytest.mark.parametrize("encoder, suffix", [("BP4", ".bp"), ("HDF5", ".h5")]) # , ("BP5", ".bp")]) # Deactivated, see: https://github.com/jorgensd/adios4dolfinx/issues/7 @pytest.mark.parametrize("ghost_mode", [dolfinx.mesh.GhostMode.shared_facet]) -def test_mesh_read_writer(encoder, suffix, ghost_mode): +def test_mesh_read_writer(encoder, suffix, ghost_mode, tmp_path): N = 25 - file = pathlib.Path(f"output/adios_mesh_{encoder}") - xdmf_file = pathlib.Path("output/xdmf_mesh") + # Consistent tmp dir across processes + fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) + file = fname / f"adios_mesh_{encoder}" + xdmf_file = fname / "xdmf_mesh" mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, N, N, N, ghost_mode=ghost_mode) start = time.perf_counter() diff --git a/tests/test_meshtags.py b/tests/test_meshtags.py index bdbe768..16a6c86 100644 --- a/tests/test_meshtags.py +++ b/tests/test_meshtags.py @@ -95,12 +95,13 @@ def generate_reference_map( @pytest.mark.parametrize("read_mode", read_modes) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_checkpointing_meshtags_1D(mesh_1D, read_comm, read_mode): +def test_checkpointing_meshtags_1D(mesh_1D, read_comm, read_mode, tmp_path): mesh = mesh_1D # Write unique mesh file for each combination of MPI communicator and dtype hash = f"{mesh.comm.size}_{mesh.geometry.x.dtype}" - filename = f"meshtags_1D_{hash}.bp" + fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) + filename = fname / f"meshtags_1D_{hash}.bp" # If mesh communicator is more than a self communicator or serial write on all processes. # If serial or self communicator, only write on root rank @@ -167,10 +168,11 @@ def test_checkpointing_meshtags_1D(mesh_1D, read_comm, read_mode): @pytest.mark.parametrize("read_mode", read_modes) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_checkpointing_meshtags_2D(mesh_2D, read_comm, read_mode): +def test_checkpointing_meshtags_2D(mesh_2D, read_comm, read_mode, tmp_path): mesh = mesh_2D hash = f"{mesh.comm.size}_{mesh.topology.cell_name()}_{mesh.geometry.x.dtype}" - filename = f"meshtags_1D_{hash}.bp" + fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) + filename = fname / f"meshtags_1D_{hash}.bp" if mesh.comm.size != 1: adios4dolfinx.write_mesh(filename, mesh, engine="BP4") else: @@ -221,10 +223,11 @@ def test_checkpointing_meshtags_2D(mesh_2D, read_comm, read_mode): @pytest.mark.parametrize("read_mode", read_modes) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) -def test_checkpointing_meshtags_3D(mesh_3D, read_comm, read_mode): +def test_checkpointing_meshtags_3D(mesh_3D, read_comm, read_mode, tmp_path): mesh = mesh_3D hash = f"{mesh.comm.size}_{mesh.topology.cell_name()}_{mesh.geometry.x.dtype}" - filename = f"meshtags_1D_{hash}.bp" + fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) + filename = fname / f"meshtags_1D_{hash}.bp" if mesh.comm.size != 1: adios4dolfinx.write_mesh(filename, mesh, engine="BP4") else: diff --git a/tests/test_original_checkpoint.py b/tests/test_original_checkpoint.py index 35c40a9..d6f63b3 100644 --- a/tests/test_original_checkpoint.py +++ b/tests/test_original_checkpoint.py @@ -30,7 +30,7 @@ @pytest.fixture(scope="module") -def create_simplex_mesh_2D(): +def create_simplex_mesh_2D(tmp_path_factory): mesh = dolfinx.mesh.create_unit_square( MPI.COMM_WORLD, 10, @@ -38,14 +38,15 @@ def create_simplex_mesh_2D(): cell_type=dolfinx.mesh.CellType.triangle, dtype=np.float64, ) - fname = Path("output/original_mesh_2D_simplex.xdmf") + fname = tmp_path_factory.mktemp("output") / "original_mesh_2D_simplex.xdmf" + fname = MPI.COMM_WORLD.bcast(fname, root=0) with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: xdmf.write_mesh(mesh) return fname @pytest.fixture(scope="module") -def create_simplex_mesh_3D(): +def create_simplex_mesh_3D(tmp_path_factory): mesh = dolfinx.mesh.create_unit_cube( MPI.COMM_WORLD, 5, @@ -54,14 +55,14 @@ def create_simplex_mesh_3D(): cell_type=dolfinx.mesh.CellType.tetrahedron, dtype=np.float64, ) - fname = Path("output/original_mesh_3D_simplex.xdmf") + fname = tmp_path_factory.mktemp("output") / "original_mesh_3D_simplex.xdmf" with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: xdmf.write_mesh(mesh) return fname @pytest.fixture(scope="module") -def create_non_simplex_mesh_2D(): +def create_non_simplex_mesh_2D(tmp_path_factory): mesh = dolfinx.mesh.create_unit_square( MPI.COMM_WORLD, 10, @@ -69,14 +70,14 @@ def create_non_simplex_mesh_2D(): cell_type=dolfinx.mesh.CellType.quadrilateral, dtype=np.float64, ) - fname = Path("output/original_mesh_2D_non_simplex.xdmf") + fname = tmp_path_factory.mktemp("output") / "original_mesh_2D_non_simplex.xdmf" with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: xdmf.write_mesh(mesh) return fname @pytest.fixture(scope="module") -def create_non_simplex_mesh_3D(): +def create_non_simplex_mesh_3D(tmp_path_factory): mesh = dolfinx.mesh.create_unit_cube( MPI.COMM_WORLD, 5, @@ -85,27 +86,27 @@ def create_non_simplex_mesh_3D(): cell_type=dolfinx.mesh.CellType.hexahedron, dtype=np.float64, ) - fname = Path("output/original_mesh_3D_non_simplex.xdmf") + fname = tmp_path_factory.mktemp("output") / "original_mesh_3D_non_simplex.xdmf" with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: xdmf.write_mesh(mesh) return fname @pytest.fixture(params=two_dim_combinations, scope="module") -def create_2D_mesh(request): +def create_2D_mesh(request, tmpdir_factory): dtype, cell_type = request.param mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 5, 7, cell_type=cell_type, dtype=dtype) - fname = Path("output/original_mesh_2D_{dtype}_{cell_type}.xdmf") + fname = Path(tmpdir_factory.mktemp("output")) / f"original_mesh_2D_{dtype}_{cell_type}.xdmf" with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: xdmf.write_mesh(mesh) return fname @pytest.fixture(params=three_dim_combinations, scope="module") -def create_3D_mesh(request): +def create_3D_mesh(request, tmpdir_factory): dtype, cell_type = request.param mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, 5, 7, 3, cell_type=cell_type, dtype=dtype) - fname = Path("output/original_mesh_3D_{dtype}_{cell_type}.xdmf") + fname = Path(tmpdir_factory.mktemp("output")) / f"original_mesh_3D_{dtype}_{cell_type}.xdmf" with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: xdmf.write_mesh(mesh) return fname @@ -118,6 +119,7 @@ def write_function_original( f: Callable[[np.ndarray], np.ndarray], dtype: np.dtype, name: str, + path: Path, ) -> Path: """Convenience function for writing function to file on the original input mesh""" V = dolfinx.fem.functionspace(mesh, el) @@ -135,8 +137,7 @@ def write_function_original( ) file_hash = f"{el_hash}_{np.dtype(dtype).name}" - filename = Path(f"output/mesh_{file_hash}.bp") - + filename = path / f"mesh_{file_hash}.bp" if write_mesh: adios4dolfinx.write_mesh_input_order(filename, mesh) adios4dolfinx.write_function_on_input_mesh(filename, uh, time=0.0) @@ -198,6 +199,7 @@ def write_function_vector( f: Callable[[np.ndarray], np.ndarray], dtype: np.dtype, name: str, + dir: Path, ) -> Path: """Convenience function for writing function to file on the original input mesh""" from mpi4py import MPI @@ -226,7 +228,7 @@ def write_function_vector( ) file_hash = f"{el_hash}_{np.dtype(dtype).name}" - filename = Path(f"output/mesh_{file_hash}.bp") + filename = dir / f"mesh_{file_hash}.bp" if write_mesh: adios4dolfinx.write_mesh_input_order(filename, mesh) @@ -274,7 +276,7 @@ def read_function_vector( @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("write_mesh", [True, False]) def test_read_write_P_2D( - write_mesh, family, degree, is_complex, create_2D_mesh, cluster, get_dtype + write_mesh, family, degree, is_complex, create_2D_mesh, cluster, get_dtype, tmp_path ): fname = create_2D_mesh with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "r") as xdmf: @@ -299,13 +301,12 @@ def f(x): values[1] += 2j * x[0] return values - hash = write_function_original(write_mesh, mesh, el, f, f_dtype, "u_original") + hash = write_function_original(write_mesh, mesh, el, f, f_dtype, "u_original", tmp_path) if write_mesh: - mesh_fname = fname - else: mesh_fname = hash - + else: + mesh_fname = fname query = cluster[:].apply_async( read_function_original, mesh_fname, hash, "u_original", family, degree, f, f_dtype ) @@ -319,7 +320,7 @@ def f(x): @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("write_mesh", [True, False]) def test_read_write_P_3D( - write_mesh, family, degree, is_complex, create_3D_mesh, cluster, get_dtype + write_mesh, family, degree, is_complex, create_3D_mesh, cluster, get_dtype, tmp_path ): fname = create_3D_mesh with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "r") as xdmf: @@ -344,13 +345,13 @@ def f(x): values[2] += 2j return values - hash = write_function_original(write_mesh, mesh, el, f, f_dtype, "u_original") + hash = write_function_original(write_mesh, mesh, el, f, f_dtype, "u_original", tmp_path) MPI.COMM_WORLD.Barrier() if write_mesh: - mesh_fname = fname - else: mesh_fname = hash + else: + mesh_fname = fname query = cluster[:].apply_async( read_function_original, mesh_fname, hash, "u_original", family, degree, f, f_dtype @@ -365,7 +366,7 @@ def f(x): @pytest.mark.parametrize("family", ["N1curl", "RT"]) @pytest.mark.parametrize("degree", [1, 4]) def test_read_write_2D_vector_simplex( - write_mesh, family, degree, is_complex, create_simplex_mesh_2D, cluster, get_dtype + write_mesh, family, degree, is_complex, create_simplex_mesh_2D, cluster, get_dtype, tmp_path ): fname = create_simplex_mesh_2D @@ -381,14 +382,7 @@ def f(x): return values query = cluster[:].apply_async( - write_function_vector, - write_mesh, - fname, - family, - degree, - f, - f_dtype, - "u_original", + write_function_vector, write_mesh, fname, family, degree, f, f_dtype, "u_original", tmp_path ) query.wait() assert query.successful(), query.error @@ -409,7 +403,7 @@ def f(x): @pytest.mark.parametrize("family", ["N1curl", "RT"]) @pytest.mark.parametrize("degree", [1, 4]) def test_read_write_3D_vector_simplex( - write_mesh, family, degree, is_complex, create_simplex_mesh_3D, cluster, get_dtype + write_mesh, family, degree, is_complex, create_simplex_mesh_3D, cluster, get_dtype, tmp_path ): fname = create_simplex_mesh_3D @@ -426,14 +420,7 @@ def f(x): return values query = cluster[:].apply_async( - write_function_vector, - write_mesh, - fname, - family, - degree, - f, - f_dtype, - "u_original", + write_function_vector, write_mesh, fname, family, degree, f, f_dtype, "u_original", tmp_path ) query.wait() assert query.successful(), query.error @@ -454,7 +441,7 @@ def f(x): @pytest.mark.parametrize("family", ["RTCF"]) @pytest.mark.parametrize("degree", [1, 2, 3]) def test_read_write_2D_vector_non_simplex( - write_mesh, family, degree, is_complex, create_non_simplex_mesh_2D, cluster, get_dtype + write_mesh, family, degree, is_complex, create_non_simplex_mesh_2D, cluster, get_dtype, tmp_path ): fname = create_non_simplex_mesh_2D @@ -470,14 +457,7 @@ def f(x): return values query = cluster[:].apply_async( - write_function_vector, - write_mesh, - fname, - family, - degree, - f, - f_dtype, - "u_original", + write_function_vector, write_mesh, fname, family, degree, f, f_dtype, "u_original", tmp_path ) query.wait() assert query.successful(), query.error @@ -498,7 +478,7 @@ def f(x): @pytest.mark.parametrize("family", ["NCF"]) @pytest.mark.parametrize("degree", [1, 4]) def test_read_write_3D_vector_non_simplex( - write_mesh, family, degree, is_complex, create_non_simplex_mesh_3D, cluster, get_dtype + write_mesh, family, degree, is_complex, create_non_simplex_mesh_3D, cluster, get_dtype, tmp_path ): fname = create_non_simplex_mesh_3D @@ -514,14 +494,7 @@ def f(x): return values query = cluster[:].apply_async( - write_function_vector, - write_mesh, - fname, - family, - degree, - f, - f_dtype, - "u_original", + write_function_vector, write_mesh, fname, family, degree, f, f_dtype, "u_original", tmp_path ) query.wait() assert query.successful(), query.error diff --git a/tests/test_snapshot_checkpoint.py b/tests/test_snapshot_checkpoint.py index 2ce1aea..0ad238b 100644 --- a/tests/test_snapshot_checkpoint.py +++ b/tests/test_snapshot_checkpoint.py @@ -24,7 +24,7 @@ "cell_type, family", [(triangle, "N1curl"), (triangle, "RT"), (quad, "RTCF")] ) @pytest.mark.parametrize("degree", [1, 4]) -def test_read_write_2D(family, degree, cell_type): +def test_read_write_2D(family, degree, cell_type, tmp_path): mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 10, 10, cell_type=cell_type) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree) @@ -35,7 +35,8 @@ def f(x): u = dolfinx.fem.Function(V) u.interpolate(f) - file = Path("snapshot_2D_vs.bp") + fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) + file = fname / Path("snapshot_2D_vs.bp") snapshot_checkpoint(u, file, adios2.Mode.Write) v = dolfinx.fem.Function(V) @@ -45,7 +46,7 @@ def f(x): @pytest.mark.parametrize("cell_type, family", [(tetra, "N1curl"), (tetra, "RT"), (hex, "NCF")]) @pytest.mark.parametrize("degree", [1, 4]) -def test_read_write_3D(family, degree, cell_type): +def test_read_write_3D(family, degree, cell_type, tmp_path): mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, 3, 3, 3, cell_type=cell_type) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree) @@ -56,7 +57,8 @@ def f(x): u = dolfinx.fem.Function(V) u.interpolate(f) - file = Path("snapshot_3D_vs.bp") + fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) + file = fname / Path("snapshot_3D_vs.bp") snapshot_checkpoint(u, file, adios2.Mode.Write) v = dolfinx.fem.Function(V) @@ -69,7 +71,7 @@ def f(x): ) @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) -def test_read_write_P_2D(family, degree, cell_type): +def test_read_write_P_2D(family, degree, cell_type, tmp_path): mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 5, 5, cell_type=cell_type) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree, shape=(mesh.geometry.dim,)) @@ -80,7 +82,8 @@ def f(x): u = dolfinx.fem.Function(V) u.interpolate(f) - file = Path("snapshot_2D_p.bp") + fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) + file = fname / Path("snapshot_2D_p.bp") snapshot_checkpoint(u, file, adios2.Mode.Write) v = dolfinx.fem.Function(V) @@ -93,7 +96,7 @@ def f(x): ) @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) -def test_read_write_P_3D(family, degree, cell_type): +def test_read_write_P_3D(family, degree, cell_type, tmp_path): mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, 5, 5, 5, cell_type=cell_type) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree, shape=(mesh.geometry.dim,)) @@ -104,7 +107,8 @@ def f(x): u = dolfinx.fem.Function(V) u.interpolate(f) - file = Path("snapshot_3D_p.bp") + fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) + file = fname / Path("snapshot_3D_p.bp") snapshot_checkpoint(u, file, adios2.Mode.Write) v = dolfinx.fem.Function(V) From 0462452d1050d62711e05715973c934aa181cc69 Mon Sep 17 00:00:00 2001 From: Henrik Finsberg Date: Sun, 3 Mar 2024 06:52:43 +0100 Subject: [PATCH 19/49] Make it possible to pass name to meshtag (#79) --- src/adios4dolfinx/checkpointing.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/adios4dolfinx/checkpointing.py b/src/adios4dolfinx/checkpointing.py index 8d7bd3f..e2b1ecf 100644 --- a/src/adios4dolfinx/checkpointing.py +++ b/src/adios4dolfinx/checkpointing.py @@ -51,6 +51,7 @@ def write_meshtags( mesh: dolfinx.mesh.Mesh, meshtags: dolfinx.mesh.MeshTags, engine: str = "BP4", + meshtag_name: str | None = None, ): """ Write meshtags associated with input mesh to file. @@ -64,6 +65,7 @@ def write_meshtags( mesh: The mesh associated with the meshtags meshtags: The meshtags to write to file engine: Adios2 Engine + meshtag_name: Name of the meshtag. If None, the meshtag name is used. """ tag_entities = meshtags.indices dim = meshtags.dim @@ -84,6 +86,8 @@ def write_meshtags( indices = mesh.geometry.index_map().local_to_global(entities_to_geometry.reshape(-1)) + name = meshtag_name or meshtags.name + adios = adios2.ADIOS(mesh.comm) with ADIOSFile( adios=adios, @@ -94,7 +98,7 @@ def write_meshtags( ) as adios_file: # Write meshtag topology topology_var = adios_file.io.DefineVariable( - meshtags.name + "_topology", + name + "_topology", indices, shape=[global_num_tag_entities, num_dofs_per_entity], start=[local_start, 0], @@ -104,7 +108,7 @@ def write_meshtags( # Write meshtag topology values_var = adios_file.io.DefineVariable( - meshtags.name + "_values", + name + "_values", local_values, shape=[global_num_tag_entities], start=[local_start], @@ -113,9 +117,7 @@ def write_meshtags( adios_file.file.Put(values_var, local_values, adios2.Mode.Sync) # Write meshtag dim - adios_file.io.DefineAttribute( - meshtags.name + "_dim", np.array([meshtags.dim], dtype=np.uint8) - ) + adios_file.io.DefineAttribute(name + "_dim", np.array([meshtags.dim], dtype=np.uint8)) adios_file.file.PerformPuts() adios_file.file.EndStep() From 9a8d4c4125a10eb3a63faab85d9b46867de78677 Mon Sep 17 00:00:00 2001 From: Henrik Finsberg Date: Sun, 3 Mar 2024 06:52:57 +0100 Subject: [PATCH 20/49] Make it possible to read and write attributes (#80) * Make it possible to read and write attributes * Test reading and writing two groups * Add missing read_attributes to __all__ --- src/adios4dolfinx/__init__.py | 4 ++ src/adios4dolfinx/checkpointing.py | 70 ++++++++++++++++++++++++++++++ tests/test_attributes.py | 38 ++++++++++++++++ 3 files changed, 112 insertions(+) create mode 100644 tests/test_attributes.py diff --git a/src/adios4dolfinx/__init__.py b/src/adios4dolfinx/__init__.py index 1c97857..dd314d0 100644 --- a/src/adios4dolfinx/__init__.py +++ b/src/adios4dolfinx/__init__.py @@ -8,9 +8,11 @@ from importlib.metadata import metadata from .checkpointing import ( + read_attributes, read_function, read_mesh, read_meshtags, + write_attributes, write_function, write_mesh, write_meshtags, @@ -38,4 +40,6 @@ "snapshot_checkpoint", "write_function_on_input_mesh", "write_mesh_input_order", + "write_attributes", + "read_attributes", ] diff --git a/src/adios4dolfinx/checkpointing.py b/src/adios4dolfinx/checkpointing.py index e2b1ecf..55fc7b7 100644 --- a/src/adios4dolfinx/checkpointing.py +++ b/src/adios4dolfinx/checkpointing.py @@ -43,9 +43,79 @@ "write_mesh", "read_meshtags", "write_meshtags", + "read_attributes", + "write_attributes", ] +def write_attributes( + filename: Path | str, + comm: MPI.Intracomm, + name: str, + attributes: dict[str, np.ndarray], + engine: str = "BP4", +): + """Write attributes to file using ADIOS2. + + Args: + filename: Path to file to write to + comm: MPI communicator used in storage + name: Name of the attributes + attributes: Dictionary of attributes to write to file + engine: ADIOS2 engine to use + """ + adios = adios2.ADIOS(comm) + with ADIOSFile( + adios=adios, + filename=filename, + mode=adios2.Mode.Append, + engine=engine, + io_name="AttributesWriter", + ) as adios_file: + adios_file.file.BeginStep() + + for k, v in attributes.items(): + adios_file.io.DefineAttribute(f"{name}_{k}", v) + + adios_file.file.PerformPuts() + adios_file.file.EndStep() + + +def read_attributes( + filename: Path | str, + comm: MPI.Intracomm, + name: str, + engine: str = "BP4", +) -> dict[str, np.ndarray]: + """Read attributes from file using ADIOS2. + + Args: + filename: Path to file to read from + comm: MPI communicator used in storage + name: Name of the attributes + engine: ADIOS2 engine to use + Returns: + The attributes + """ + + adios = adios2.ADIOS(comm) + with ADIOSFile( + adios=adios, + filename=filename, + mode=adios2.Mode.Read, + engine=engine, + io_name="AttributesReader", + ) as adios_file: + adios_file.file.BeginStep() + attributes = {} + for k in adios_file.io.AvailableAttributes().keys(): + if k.startswith(f"{name}_"): + a = adios_file.io.InquireAttribute(k) + attributes[k[len(name) + 1 :]] = a.Data() + adios_file.file.EndStep() + return attributes + + def write_meshtags( filename: Path | str, mesh: dolfinx.mesh.Mesh, diff --git a/tests/test_attributes.py b/tests/test_attributes.py new file mode 100644 index 0000000..cda1c45 --- /dev/null +++ b/tests/test_attributes.py @@ -0,0 +1,38 @@ +from pathlib import Path + +from mpi4py import MPI + +import numpy as np +import pytest + +import adios4dolfinx + + +@pytest.mark.parametrize("comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) +def test_read_write_attributes(comm, tmp_path): + attributes1 = { + "a": np.array([1, 2, 3], dtype=np.uint8), + "b": np.array([4, 5], dtype=np.uint8), + } + attributes2 = { + "c": np.array([6], dtype=np.uint8), + "d": np.array([7, 8, 9, 10], dtype=np.uint8), + } + fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) + file = fname / Path("attributes.bp") + + adios4dolfinx.write_attributes(comm=comm, filename=file, name="group1", attributes=attributes1) + adios4dolfinx.write_attributes(comm=comm, filename=file, name="group2", attributes=attributes2) + MPI.COMM_WORLD.Barrier() + loaded_attributes1 = adios4dolfinx.read_attributes(comm=comm, filename=file, name="group1") + loaded_attributes2 = adios4dolfinx.read_attributes(comm=comm, filename=file, name="group2") + + for k, v in loaded_attributes1.items(): + assert np.allclose(v, attributes1[k]) + for k, v in attributes1.items(): + assert np.allclose(v, loaded_attributes1[k]) + + for k, v in loaded_attributes2.items(): + assert np.allclose(v, attributes2[k]) + for k, v in attributes2.items(): + assert np.allclose(v, loaded_attributes2[k]) From 5a72057f53a3b32af4c01471e2fc21423ad2b624 Mon Sep 17 00:00:00 2001 From: jorgensd Date: Sun, 3 Mar 2024 08:49:50 +0100 Subject: [PATCH 21/49] Fix init ruff formatting --- src/adios4dolfinx/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/adios4dolfinx/__init__.py b/src/adios4dolfinx/__init__.py index dd314d0..f17683b 100644 --- a/src/adios4dolfinx/__init__.py +++ b/src/adios4dolfinx/__init__.py @@ -5,6 +5,7 @@ # SPDX-License-Identifier: MIT """Top-level package for ADIOS2Wrappers.""" + from importlib.metadata import metadata from .checkpointing import ( From 1d65116184455d09e3377547ad7ba3dc12aec30a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Mon, 4 Mar 2024 15:20:17 +0100 Subject: [PATCH 22/49] Add read from partition (#81) * Add read from partition. Addresses Add read in partitioning option for adios4dolfinx #27 * Rename function to reflect what it is actually doing * Mypy assertion checks * Add text to readme * Apply suggestions from code review Co-authored-by: Henrik Finsberg * Add infor regarding ghost mode and read partition --------- Co-authored-by: Henrik Finsberg --- README.md | 1 + src/adios4dolfinx/adios2_helpers.py | 12 ++-- src/adios4dolfinx/checkpointing.py | 72 ++++++++++++++++++++++-- src/adios4dolfinx/original_checkpoint.py | 9 +++ src/adios4dolfinx/structures.py | 8 +++ src/adios4dolfinx/writers.py | 28 +++++++++ tests/test_mesh_writer.py | 59 ++++++++++++++++--- 7 files changed, 169 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index 98524e2..8c0d451 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,7 @@ python3 -m pip install git+https://github.com/jorgensd/adios4dolfinx@v0.7.2 - Reading and writing meshtags associated to meshes `adios4dolfinx.read/write_meshtags` - Reading checkpoints for any element (serial and parallel, arbitrary number of functions and timesteps per file). Use `adios4dolfinx.read/write_function`. - Writing standalone function checkpoints relating to "original meshes", i.e. meshes read from `XDMFFile`. Use `adios4dolfinx.write_function_on_input_mesh` for this. +- Store mesh partitioning and re-read the mesh with this information, avoiding calling SCOTCH, Kahip or Parmetis. > [!IMPORTANT] > For checkpoints written with `write_function` to be valid, you first have to store the mesh with `write_mesh` to the checkpoint file. diff --git a/src/adios4dolfinx/adios2_helpers.py b/src/adios4dolfinx/adios2_helpers.py index 5f394a2..05bfd70 100644 --- a/src/adios4dolfinx/adios2_helpers.py +++ b/src/adios4dolfinx/adios2_helpers.py @@ -25,7 +25,7 @@ def resolve_adios_scope(adios2): Helpers reading/writing data with ADIOS2 """ -__all__ = ["read_array", "read_dofmap", "read_cell_perms", "adios_to_numpy_dtype"] +__all__ = ["read_array", "read_adjacency_list", "read_cell_perms", "adios_to_numpy_dtype"] adios_to_numpy_dtype = { "float": np.float32, @@ -123,7 +123,7 @@ def read_cell_perms( return in_perm -def read_dofmap( +def read_adjacency_list( adios: adios2.ADIOS, comm: MPI.Intracomm, filename: Path | str, @@ -131,10 +131,10 @@ def read_dofmap( dofmap_offsets: str, num_cells_global: np.int64, engine: str, -) -> dolfinx.cpp.graph.AdjacencyList_int64: +) -> dolfinx.cpp.graph.AdjacencyList_int64 | dolfinx.cpp.graph.AdjacencyList_int32: """ - Read dofmap with given communicator, - split in continuous chunks based on number of cells in the mesh (global). + Read an adjacency-list from an ADIOS file with given communicator. + The adjancency list is split in to a flat array (data) and its corresponding offset. Args: adios: The ADIOS instance @@ -195,8 +195,6 @@ def read_dofmap( cell_dofs.SetSelection([[in_offsets[0]], [in_offsets[-1] - in_offsets[0]]]) in_dofmap = np.empty(in_offsets[-1] - in_offsets[0], dtype=cell_dofs.Type().strip("_t")) adios_file.file.Get(cell_dofs, in_dofmap, adios2.Mode.Sync) - - in_dofmap = in_dofmap.astype(np.int64) in_offsets -= in_offsets[0] adios_file.file.EndStep() diff --git a/src/adios4dolfinx/checkpointing.py b/src/adios4dolfinx/checkpointing.py index 55fc7b7..b19c716 100644 --- a/src/adios4dolfinx/checkpointing.py +++ b/src/adios4dolfinx/checkpointing.py @@ -19,9 +19,9 @@ from .adios2_helpers import ( ADIOSFile, adios_to_numpy_dtype, + read_adjacency_list, read_array, read_cell_perms, - read_dofmap, resolve_adios_scope, ) from .comm_helpers import ( @@ -338,7 +338,7 @@ def read_function( else: dofmap_path = f"{name}_dofmap" xdofmap_path = f"{name}_XDofmap" - input_dofmap = read_dofmap( + input_dofmap = read_adjacency_list( adios, comm, filename, dofmap_path, xdofmap_path, num_cells_global, engine ) # Compute owner of dofs in dofmap @@ -422,6 +422,7 @@ def read_mesh( comm: MPI.Intracomm, engine: str = "BP4", ghost_mode: dolfinx.mesh.GhostMode = dolfinx.mesh.GhostMode.shared_facet, + read_from_partition: int = False, ) -> dolfinx.mesh.Mesh: """ Read an ADIOS2 mesh into DOLFINx. @@ -430,7 +431,9 @@ def read_mesh( filename: Path to input file comm: The MPI communciator to distribute the mesh over engine: ADIOS engine to use for reading (BP4, BP5 or HDF5) - ghost_mode: Ghost mode to use for mesh + ghost_mode: Ghost mode to use for mesh. If `read_from_partition` + is set to `True` this option is ignored. + read_from_partition: Read mesh with partition from file Returns: The distributed mesh """ @@ -487,6 +490,15 @@ def read_mesh( adios_file.file.Get(topology, mesh_topology, adios2.Mode.Deferred) adios_file.file.PerformGets() + + # Check validity of partitioning information + if read_from_partition: + if "PartitionProcesses" not in adios_file.io.AvailableAttributes().keys(): + raise KeyError(f"Partitioning information not found in {filename}") + par_num_procs = adios_file.io.InquireAttribute("PartitionProcesses") + num_procs = par_num_procs.Data()[0] + if num_procs != comm.size: + raise ValueError(f"Number of processes in file ({num_procs})!=({comm.size=})") adios_file.file.EndStep() # Create DOLFINx mesh @@ -499,11 +511,24 @@ def read_mesh( dtype=mesh_geometry.dtype, ) domain = ufl.Mesh(element) - partitioner = dolfinx.cpp.mesh.create_cell_partitioner(ghost_mode) + + if read_from_partition: + partition_graph = read_adjacency_list( + adios, comm, filename, "PartitioningData", "PartitioningOffset", shape[0], engine + ) + + def partitioner(comm: MPI.Intracomm, n, m, topo): + assert len(partition_graph.offsets) - 1 == topo.num_nodes + return partition_graph + else: + partitioner = dolfinx.cpp.mesh.create_cell_partitioner(ghost_mode) + return dolfinx.mesh.create_mesh(comm, mesh_topology, mesh_geometry, domain, partitioner) -def write_mesh(filename: Path, mesh: dolfinx.mesh.Mesh, engine: str = "BP4"): +def write_mesh( + filename: Path, mesh: dolfinx.mesh.Mesh, engine: str = "BP4", store_partition_info: bool = False +): """ Write a mesh to specified ADIOS2 format, see: https://adios2.readthedocs.io/en/stable/engines/engines.html @@ -513,6 +538,7 @@ def write_mesh(filename: Path, mesh: dolfinx.mesh.Mesh, engine: str = "BP4"): filename: Path to save mesh (without file-extension) mesh: The mesh to write to file engine: Adios2 Engine + store_partition_info: Store mesh partitioning (including ghosting) to file """ num_xdofs_local = mesh.geometry.index_map().size_local num_xdofs_global = mesh.geometry.index_map().size_global @@ -534,6 +560,36 @@ def write_mesh(filename: Path, mesh: dolfinx.mesh.Mesh, engine: str = "BP4"): g_imap.local_to_global(g_dmap[:num_cells_local, :].reshape(-1)) ).reshape(dofs_out.shape) + if store_partition_info: + partition_processes = mesh.comm.size + + # Get partitioning + cell_map = mesh.topology.index_map(mesh.topology.dim).index_to_dest_ranks() + num_cells_local = mesh.topology.index_map(mesh.topology.dim).size_local + cell_offsets = cell_map.offsets[: num_cells_local + 1] + if cell_offsets[-1] == 0: + cell_array = np.empty(0, dtype=np.int32) + else: + cell_array = cell_map.array[cell_offsets[-1]] + + # Compute adjacency with current process as first entry + ownership_array = np.full(num_cells_local + cell_offsets[-1], -1, dtype=np.int32) + ownership_offset = cell_offsets + np.arange(len(cell_offsets), dtype=np.int32) + ownership_array[ownership_offset[:-1]] = mesh.comm.rank + insert_position = np.flatnonzero(ownership_array == -1) + ownership_array[insert_position] = cell_array + + partition_map = dolfinx.common.IndexMap(mesh.comm, ownership_array.size) + ownership_offset += partition_map.local_range[0] + partition_range = partition_map.local_range + partition_global = partition_map.size_global + else: + partition_processes = None + ownership_array = None + ownership_offset = None + partition_range = None + partition_global = None + mesh_data = MeshData( local_geometry=mesh.geometry.x[:num_xdofs_local, :gdim].copy(), local_geometry_pos=geometry_range, @@ -544,6 +600,12 @@ def write_mesh(filename: Path, mesh: dolfinx.mesh.Mesh, engine: str = "BP4"): cell_type=mesh.topology.cell_name(), degree=mesh.geometry.cmap.degree, lagrange_variant=mesh.geometry.cmap.variant, + store_partition=store_partition_info, + partition_processes=partition_processes, + ownership_array=ownership_array, + ownership_offset=ownership_offset, + partition_range=partition_range, + partition_global=partition_global, ) # NOTE: Mode will become input again once we have variable geometry diff --git a/src/adios4dolfinx/original_checkpoint.py b/src/adios4dolfinx/original_checkpoint.py index 41ac274..b373bcc 100644 --- a/src/adios4dolfinx/original_checkpoint.py +++ b/src/adios4dolfinx/original_checkpoint.py @@ -176,6 +176,9 @@ def create_original_mesh_data(mesh: dolfinx.mesh.Mesh) -> MeshData: geometry = geometry[:, :gdim].copy() assert local_node_range[1] - local_node_range[0] == geometry.shape[0] cmap = mesh.geometry.cmap + + # NOTE: Could in theory store partitioning information, but would not work nicely + # as one would need to read this data rather than the xdmffile. return MeshData( local_geometry=geometry, local_geometry_pos=local_node_range, @@ -186,6 +189,12 @@ def create_original_mesh_data(mesh: dolfinx.mesh.Mesh) -> MeshData: cell_type=mesh.topology.cell_name(), degree=cmap.degree, lagrange_variant=cmap.variant, + store_partition=False, + partition_processes=None, + ownership_array=None, + ownership_offset=None, + partition_range=None, + partition_global=None, ) diff --git a/src/adios4dolfinx/structures.py b/src/adios4dolfinx/structures.py index cbb9558..b326a32 100644 --- a/src/adios4dolfinx/structures.py +++ b/src/adios4dolfinx/structures.py @@ -31,6 +31,14 @@ class MeshData: degree: int lagrange_variant: int + # Partitioning_information + store_partition: bool + partition_processes: int | None # Number of processes in partition + ownership_array: npt.NDArray[np.int32] | None # Ownership array for cells + ownership_offset: npt.NDArray[np.int32] | None # Ownership offset for cells + partition_range: tuple[int, int] | None # Local insert position for partitioning information + partition_global: int | None + @dataclass class FunctionData: diff --git a/src/adios4dolfinx/writers.py b/src/adios4dolfinx/writers.py index 5f9ff48..0c23b82 100644 --- a/src/adios4dolfinx/writers.py +++ b/src/adios4dolfinx/writers.py @@ -76,6 +76,34 @@ def write_mesh( ) adios_file.file.Put(dvar, mesh.local_topology) + + # Add partitioning data + if mesh.store_partition: + assert mesh.partition_range is not None + par_data = adios_file.io.DefineVariable( + "PartitioningData", + mesh.ownership_array, + shape=[mesh.partition_global], + start=[mesh.partition_range[0]], + count=[ + mesh.partition_range[1] - mesh.partition_range[0], + ], + ) + adios_file.file.Put(par_data, mesh.ownership_array) + assert mesh.ownership_offset is not None + par_offset = adios_file.io.DefineVariable( + "PartitioningOffset", + mesh.ownership_offset, + shape=[mesh.num_cells_global + 1], + start=[mesh.local_topology_pos[0]], + count=[mesh.local_topology_pos[1] - mesh.local_topology_pos[0] + 1], + ) + adios_file.file.Put(par_offset, mesh.ownership_offset) + assert mesh.partition_processes is not None + adios_file.io.DefineAttribute( + "PartitionProcesses", np.array([mesh.partition_processes], dtype=np.int32) + ) + adios_file.file.PerformPuts() adios_file.file.EndStep() diff --git a/tests/test_mesh_writer.py b/tests/test_mesh_writer.py index 4902b35..a6f7232 100644 --- a/tests/test_mesh_writer.py +++ b/tests/test_mesh_writer.py @@ -10,11 +10,13 @@ from adios4dolfinx import read_mesh, write_mesh -@pytest.mark.parametrize("encoder, suffix", [("BP4", ".bp"), ("HDF5", ".h5")]) -# , ("BP5", ".bp")]) # Deactivated, see: https://github.com/jorgensd/adios4dolfinx/issues/7 -@pytest.mark.parametrize("ghost_mode", [dolfinx.mesh.GhostMode.shared_facet]) -def test_mesh_read_writer(encoder, suffix, ghost_mode, tmp_path): - N = 25 +@pytest.mark.parametrize("encoder, suffix", [("BP4", ".bp"), ("HDF5", ".h5"), ("BP5", ".bp")]) +@pytest.mark.parametrize( + "ghost_mode", [dolfinx.mesh.GhostMode.shared_facet, dolfinx.mesh.GhostMode.none] +) +@pytest.mark.parametrize("store_partition", [True, False]) +def test_mesh_read_writer(encoder, suffix, ghost_mode, tmp_path, store_partition): + N = 3 # Consistent tmp dir across processes fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) file = fname / f"adios_mesh_{encoder}" @@ -22,7 +24,7 @@ def test_mesh_read_writer(encoder, suffix, ghost_mode, tmp_path): mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, N, N, N, ghost_mode=ghost_mode) start = time.perf_counter() - write_mesh(file.with_suffix(suffix), mesh, encoder) + write_mesh(file.with_suffix(suffix), mesh, encoder, store_partition_info=store_partition) end = time.perf_counter() print(f"Write ADIOS2 mesh: {end-start}") @@ -35,7 +37,45 @@ def test_mesh_read_writer(encoder, suffix, ghost_mode, tmp_path): mesh.comm.Barrier() start = time.perf_counter() - mesh_adios = read_mesh(file.with_suffix(suffix), MPI.COMM_WORLD, encoder, ghost_mode) + mesh_adios = read_mesh( + file.with_suffix(suffix), MPI.COMM_WORLD, encoder, ghost_mode, store_partition + ) + + if store_partition: + + def compute_distance_matrix(points_A, points_B, tol=1e-12): + points_A_e = np.expand_dims(points_A, 1) + points_B_e = np.expand_dims(points_B, 0) + distances = np.sum(np.square(points_A_e - points_B_e), axis=2) + return distances < tol + + cell_map = mesh.topology.index_map(mesh.topology.dim) + new_cell_map = mesh_adios.topology.index_map(mesh_adios.topology.dim) + assert cell_map.size_local == new_cell_map.size_local + assert cell_map.num_ghosts == new_cell_map.num_ghosts + midpoints = dolfinx.mesh.compute_midpoints( + mesh, + mesh.topology.dim, + np.arange(cell_map.size_local + cell_map.num_ghosts, dtype=np.int32), + ) + new_midpoints = dolfinx.mesh.compute_midpoints( + mesh_adios, + mesh_adios.topology.dim, + np.arange(new_cell_map.size_local + new_cell_map.num_ghosts, dtype=np.int32), + ) + # Check that all points in owned by initial mesh is owned by the new mesh + # (might be locally reordered) + owned_distances = compute_distance_matrix( + midpoints[: cell_map.size_local], new_midpoints[: new_cell_map.size_local] + ) + np.testing.assert_allclose(np.sum(owned_distances, axis=1), 1) + # Check that all points that are ghosted in original mesh is ghosted on the + # same process in the new mesh + ghost_distances = compute_distance_matrix( + midpoints[cell_map.size_local :], new_midpoints[new_cell_map.size_local :] + ) + np.testing.assert_allclose(np.sum(ghost_distances, axis=1), 1) + end = time.perf_counter() print(f"Read ADIOS2 mesh: {end-start}") mesh.comm.Barrier() @@ -56,7 +96,10 @@ def test_mesh_read_writer(encoder, suffix, ghost_mode, tmp_path): ) # Check that integration over different entities are consistent - for measure in [ufl.ds, ufl.dS, ufl.dx]: + measures = ( + [ufl.ds, ufl.dx] if ghost_mode is dolfinx.mesh.GhostMode.none else [ufl.ds, ufl.dS, ufl.dx] + ) + for measure in measures: c_adios = dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * measure(domain=mesh_adios))) c_ref = dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * measure(domain=mesh))) c_xdmf = dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * measure(domain=mesh_xdmf))) From 476549a1b4d2884fda5eb2418d869dbac8ae8332 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Mon, 4 Mar 2024 16:33:19 +0100 Subject: [PATCH 23/49] Add time dependent mesh checkpoint (#83) * Add time dependent mesh checkpoint. * Make geometry and timestamp only time dependent variable to reduce file size * Add legacy flag to legacy test * Fix a type hint + a typo in read from partition --- src/adios4dolfinx/checkpointing.py | 82 +++++++++++++++------- src/adios4dolfinx/writers.py | 107 ++++++++++++++++------------- tests/test_legacy_readers.py | 2 +- tests/test_mesh_writer.py | 103 +++++++++++++++++++++------ 4 files changed, 202 insertions(+), 92 deletions(-) diff --git a/src/adios4dolfinx/checkpointing.py b/src/adios4dolfinx/checkpointing.py index b19c716..9bf2c62 100644 --- a/src/adios4dolfinx/checkpointing.py +++ b/src/adios4dolfinx/checkpointing.py @@ -166,6 +166,8 @@ def write_meshtags( engine=engine, io_name="MeshTagWriter", ) as adios_file: + adios_file.file.BeginStep() + # Write meshtag topology topology_var = adios_file.io.DefineVariable( name + "_topology", @@ -422,7 +424,9 @@ def read_mesh( comm: MPI.Intracomm, engine: str = "BP4", ghost_mode: dolfinx.mesh.GhostMode = dolfinx.mesh.GhostMode.shared_facet, - read_from_partition: int = False, + time: float = 0.0, + legacy: bool = False, + read_from_partition: bool = False, ) -> dolfinx.mesh.Mesh: """ Read an ADIOS2 mesh into DOLFINx. @@ -433,6 +437,8 @@ def read_mesh( engine: ADIOS engine to use for reading (BP4, BP5 or HDF5) ghost_mode: Ghost mode to use for mesh. If `read_from_partition` is set to `True` this option is ignored. + time: Time stamp associated with mesh + legacy: If checkpoint was made prior to time-dependent mesh-writer set to True read_from_partition: Read mesh with partition from file Returns: The distributed mesh @@ -446,7 +452,26 @@ def read_mesh( engine=engine, io_name="MeshReader", ) as adios_file: + # Get time independent mesh variables (mesh topology and cell type info) first adios_file.file.BeginStep() + # Get mesh topology (distributed) + if "Topology" not in adios_file.io.AvailableVariables().keys(): + raise KeyError(f"Mesh topology not found at Topology in {filename}") + topology = adios_file.io.InquireVariable("Topology") + shape = topology.Shape() + local_range = compute_local_range(comm, shape[0]) + topology.SetSelection([[local_range[0], 0], [local_range[1] - local_range[0], shape[1]]]) + mesh_topology = np.empty((local_range[1] - local_range[0], shape[1]), dtype=np.int64) + adios_file.file.Get(topology, mesh_topology, adios2.Mode.Deferred) + + # Check validity of partitioning information + if read_from_partition: + if "PartitionProcesses" not in adios_file.io.AvailableAttributes().keys(): + raise KeyError(f"Partitioning information not found in {filename}") + par_num_procs = adios_file.io.InquireAttribute("PartitionProcesses") + num_procs = par_num_procs.Data()[0] + if num_procs != comm.size: + raise ValueError(f"Number of processes in file ({num_procs})!=({comm.size=})") # Get mesh cell type if "CellType" not in adios_file.io.AvailableAttributes().keys(): @@ -462,6 +487,29 @@ def read_mesh( raise KeyError(f"Mesh degree not found in {filename}") degree = adios_file.io.InquireAttribute("Degree").Data()[0] + if not legacy: + time_name = "MeshTime" + for i in range(adios_file.file.Steps()): + if i > 0: + adios_file.file.BeginStep() + if time_name in adios_file.io.AvailableVariables().keys(): + arr = adios_file.io.InquireVariable(time_name) + time_shape = arr.Shape() + arr.SetSelection([[0], [time_shape[0]]]) + times = np.empty(time_shape[0], dtype=adios_to_numpy_dtype[arr.Type()]) + adios_file.file.Get(arr, times, adios2.Mode.Sync) + if times[0] == time: + break + if i == adios_file.file.Steps() - 1: + raise KeyError( + f"No data associated with {time_name}={time} found in {filename}" + ) + + adios_file.file.EndStep() + + if time_name not in adios_file.io.AvailableVariables().keys(): + raise KeyError(f"No data associated with {time_name}={time} found in {filename}") + # Get mesh geometry if "Points" not in adios_file.io.AvailableVariables().keys(): raise KeyError(f"Mesh coordinates not found at Points in {filename}") @@ -479,26 +527,7 @@ def read_mesh( dtype=adios_to_numpy_dtype[geometry.Type()], ) adios_file.file.Get(geometry, mesh_geometry, adios2.Mode.Deferred) - # Get mesh topology (distributed) - if "Topology" not in adios_file.io.AvailableVariables().keys(): - raise KeyError(f"Mesh topology not found at Topology in {filename}") - topology = adios_file.io.InquireVariable("Topology") - shape = topology.Shape() - local_range = compute_local_range(comm, shape[0]) - topology.SetSelection([[local_range[0], 0], [local_range[1] - local_range[0], shape[1]]]) - mesh_topology = np.empty((local_range[1] - local_range[0], shape[1]), dtype=np.int64) - adios_file.file.Get(topology, mesh_topology, adios2.Mode.Deferred) - adios_file.file.PerformGets() - - # Check validity of partitioning information - if read_from_partition: - if "PartitionProcesses" not in adios_file.io.AvailableAttributes().keys(): - raise KeyError(f"Partitioning information not found in {filename}") - par_num_procs = adios_file.io.InquireAttribute("PartitionProcesses") - num_procs = par_num_procs.Data()[0] - if num_procs != comm.size: - raise ValueError(f"Number of processes in file ({num_procs})!=({comm.size=})") adios_file.file.EndStep() # Create DOLFINx mesh @@ -527,7 +556,12 @@ def partitioner(comm: MPI.Intracomm, n, m, topo): def write_mesh( - filename: Path, mesh: dolfinx.mesh.Mesh, engine: str = "BP4", store_partition_info: bool = False + filename: Path, + mesh: dolfinx.mesh.Mesh, + engine: str = "BP4", + mode: adios2.Mode = adios2.Mode.Write, + time: float = 0.0, + store_partition_info: bool = False, ): """ Write a mesh to specified ADIOS2 format, see: @@ -570,7 +604,7 @@ def write_mesh( if cell_offsets[-1] == 0: cell_array = np.empty(0, dtype=np.int32) else: - cell_array = cell_map.array[cell_offsets[-1]] + cell_array = cell_map.array[: cell_offsets[-1]] # Compute adjacency with current process as first entry ownership_array = np.full(num_cells_local + cell_offsets[-1], -1, dtype=np.int32) @@ -608,13 +642,13 @@ def write_mesh( partition_global=partition_global, ) - # NOTE: Mode will become input again once we have variable geometry _internal_mesh_writer( filename, mesh.comm, mesh_data, engine, - mode=adios2.Mode.Write, + mode=mode, + time=time, io_name="MeshWriter", ) diff --git a/src/adios4dolfinx/writers.py b/src/adios4dolfinx/writers.py index 0c23b82..26bf991 100644 --- a/src/adios4dolfinx/writers.py +++ b/src/adios4dolfinx/writers.py @@ -5,6 +5,7 @@ # SPDX-License-Identifier: MIT +import warnings from pathlib import Path from mpi4py import MPI @@ -24,6 +25,7 @@ def write_mesh( mesh: MeshData, engine: str = "BP4", mode: adios2.Mode = adios2.Mode.Write, + time: float = 0.0, io_name: str = "MeshWriter", ): """ @@ -43,6 +45,7 @@ def write_mesh( with ADIOSFile( adios=adios, filename=filename, mode=mode, engine=engine, io_name=io_name ) as adios_file: + adios_file.file.BeginStep() # Write geometry pointvar = adios_file.io.DefineVariable( "Points", @@ -53,56 +56,65 @@ def write_mesh( ) adios_file.file.Put(pointvar, mesh.local_geometry, adios2.Mode.Sync) - # Write celltype - adios_file.io.DefineAttribute("CellType", mesh.cell_type) - - # Write basix properties - adios_file.io.DefineAttribute("Degree", np.array([mesh.degree], dtype=np.int32)) - adios_file.io.DefineAttribute( - "LagrangeVariant", np.array([mesh.lagrange_variant], dtype=np.int32) - ) - - # Write topology - num_dofs_per_cell = mesh.local_topology.shape[1] - dvar = adios_file.io.DefineVariable( - "Topology", - mesh.local_topology, - shape=[mesh.num_cells_global, num_dofs_per_cell], - start=[mesh.local_topology_pos[0], 0], - count=[ - mesh.local_topology_pos[1] - mesh.local_topology_pos[0], - num_dofs_per_cell, - ], - ) - - adios_file.file.Put(dvar, mesh.local_topology) - - # Add partitioning data - if mesh.store_partition: - assert mesh.partition_range is not None - par_data = adios_file.io.DefineVariable( - "PartitioningData", - mesh.ownership_array, - shape=[mesh.partition_global], - start=[mesh.partition_range[0]], + if mode == adios2.Mode.Write: + adios_file.io.DefineAttribute("CellType", mesh.cell_type) + adios_file.io.DefineAttribute("Degree", np.array([mesh.degree], dtype=np.int32)) + adios_file.io.DefineAttribute( + "LagrangeVariant", np.array([mesh.lagrange_variant], dtype=np.int32) + ) + # Write topology (on;y on first write as topology is constant) + num_dofs_per_cell = mesh.local_topology.shape[1] + dvar = adios_file.io.DefineVariable( + "Topology", + mesh.local_topology, + shape=[mesh.num_cells_global, num_dofs_per_cell], + start=[mesh.local_topology_pos[0], 0], count=[ - mesh.partition_range[1] - mesh.partition_range[0], + mesh.local_topology_pos[1] - mesh.local_topology_pos[0], + num_dofs_per_cell, ], ) - adios_file.file.Put(par_data, mesh.ownership_array) - assert mesh.ownership_offset is not None - par_offset = adios_file.io.DefineVariable( - "PartitioningOffset", - mesh.ownership_offset, - shape=[mesh.num_cells_global + 1], - start=[mesh.local_topology_pos[0]], - count=[mesh.local_topology_pos[1] - mesh.local_topology_pos[0] + 1], - ) - adios_file.file.Put(par_offset, mesh.ownership_offset) - assert mesh.partition_processes is not None - adios_file.io.DefineAttribute( - "PartitionProcesses", np.array([mesh.partition_processes], dtype=np.int32) - ) + adios_file.file.Put(dvar, mesh.local_topology) + + # Add partitioning data + if mesh.store_partition: + assert mesh.partition_range is not None + par_data = adios_file.io.DefineVariable( + "PartitioningData", + mesh.ownership_array, + shape=[mesh.partition_global], + start=[mesh.partition_range[0]], + count=[ + mesh.partition_range[1] - mesh.partition_range[0], + ], + ) + adios_file.file.Put(par_data, mesh.ownership_array) + assert mesh.ownership_offset is not None + par_offset = adios_file.io.DefineVariable( + "PartitioningOffset", + mesh.ownership_offset, + shape=[mesh.num_cells_global + 1], + start=[mesh.local_topology_pos[0]], + count=[mesh.local_topology_pos[1] - mesh.local_topology_pos[0] + 1], + ) + adios_file.file.Put(par_offset, mesh.ownership_offset) + assert mesh.partition_processes is not None + adios_file.io.DefineAttribute( + "PartitionProcesses", np.array([mesh.partition_processes], dtype=np.int32) + ) + if mode == adios2.Mode.Append and mesh.store_partition: + warnings.warn("Partitioning data is not written in append mode") + + # Add time step to file + t_arr = np.array([time], dtype=np.float64) + time_var = adios_file.io.DefineVariable( + "MeshTime", + t_arr, + shape=[1], + start=[0], + count=[1 if comm.rank == 0 else 0], + ) + adios_file.file.Put(time_var, t_arr) adios_file.file.PerformPuts() adios_file.file.EndStep() @@ -134,6 +146,7 @@ def write_function( with ADIOSFile( adios=adios, filename=filename, mode=mode, engine=engine, io_name=io_name ) as adios_file: + adios_file.file.BeginStep() # Add mesh permutations pvar = adios_file.io.DefineVariable( "CellPermutations", diff --git a/tests/test_legacy_readers.py b/tests/test_legacy_readers.py index dd41bac..d1befa8 100644 --- a/tests/test_legacy_readers.py +++ b/tests/test_legacy_readers.py @@ -149,7 +149,7 @@ def test_adios4dolfinx_legacy(): pytest.skip(f"{path} does not exist") el = ("N1curl", 3) - mesh = read_mesh(path, comm, "BP4", dolfinx.mesh.GhostMode.shared_facet) + mesh = read_mesh(path, comm, "BP4", dolfinx.mesh.GhostMode.shared_facet, legacy=True) def f(x): values = np.zeros((2, x.shape[1]), dtype=np.float64) diff --git a/tests/test_mesh_writer.py b/tests/test_mesh_writer.py index a6f7232..e789ed1 100644 --- a/tests/test_mesh_writer.py +++ b/tests/test_mesh_writer.py @@ -1,5 +1,3 @@ -import time - from mpi4py import MPI import dolfinx @@ -8,6 +6,7 @@ import ufl from adios4dolfinx import read_mesh, write_mesh +from adios4dolfinx.adios2_helpers import adios2 @pytest.mark.parametrize("encoder, suffix", [("BP4", ".bp"), ("HDF5", ".h5"), ("BP5", ".bp")]) @@ -16,31 +15,27 @@ ) @pytest.mark.parametrize("store_partition", [True, False]) def test_mesh_read_writer(encoder, suffix, ghost_mode, tmp_path, store_partition): - N = 3 + N = 7 # Consistent tmp dir across processes fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) - file = fname / f"adios_mesh_{encoder}" - xdmf_file = fname / "xdmf_mesh" + file = fname / f"adios_mesh_{encoder}_{store_partition}" + xdmf_file = fname / "xdmf_mesh_{encode}_{ghost_mode}_{store_partition}" mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, N, N, N, ghost_mode=ghost_mode) - start = time.perf_counter() write_mesh(file.with_suffix(suffix), mesh, encoder, store_partition_info=store_partition) - end = time.perf_counter() - print(f"Write ADIOS2 mesh: {end-start}") - mesh.comm.Barrier() - start = time.perf_counter() with dolfinx.io.XDMFFile(mesh.comm, xdmf_file.with_suffix(".xdmf"), "w") as xdmf: xdmf.write_mesh(mesh) - end = time.perf_counter() - print(f"Write XDMF mesh: {end-start}") mesh.comm.Barrier() - start = time.perf_counter() mesh_adios = read_mesh( - file.with_suffix(suffix), MPI.COMM_WORLD, encoder, ghost_mode, store_partition + file.with_suffix(suffix), + MPI.COMM_WORLD, + engine=encoder, + ghost_mode=ghost_mode, + read_from_partition=store_partition, ) - + mesh_adios.comm.Barrier() if store_partition: def compute_distance_matrix(points_A, points_B, tol=1e-12): @@ -76,15 +71,10 @@ def compute_distance_matrix(points_A, points_B, tol=1e-12): ) np.testing.assert_allclose(np.sum(ghost_distances, axis=1), 1) - end = time.perf_counter() - print(f"Read ADIOS2 mesh: {end-start}") mesh.comm.Barrier() - start = time.perf_counter() with dolfinx.io.XDMFFile(mesh.comm, xdmf_file.with_suffix(".xdmf"), "r") as xdmf: mesh_xdmf = xdmf.read_mesh(ghost_mode=ghost_mode) - end = time.perf_counter() - print(f"Read XDMF mesh: {end-start}") for i in range(mesh.topology.dim + 1): mesh.topology.create_entities(i) @@ -111,3 +101,76 @@ def compute_distance_matrix(points_A, points_B, tol=1e-12): mesh_adios.comm.allreduce(c_adios, MPI.SUM), mesh.comm.allreduce(c_ref, MPI.SUM), ) + + +@pytest.mark.parametrize("encoder, suffix", [("BP4", ".bp"), ("BP5", ".bp")]) +@pytest.mark.parametrize( + "ghost_mode", [dolfinx.mesh.GhostMode.shared_facet, dolfinx.mesh.GhostMode.none] +) +@pytest.mark.parametrize("store_partition", [True, False]) +def test_timedep_mesh(encoder, suffix, ghost_mode, tmp_path, store_partition): + # Currently unsupported, unclear why ("HDF5", ".h5"), + N = 13 + # Consistent tmp dir across processes + fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) + file = fname / f"adios_time_dep_mesh_{encoder}" + mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, N, N, N, ghost_mode=ghost_mode) + + def u(x): + return np.asarray([x[0] + 0.1 * np.sin(x[1]), 0.2 * np.cos(x[1]), x[2]]) + + write_mesh( + file.with_suffix(suffix), + mesh, + encoder, + mode=adios2.Mode.Write, + time=0.0, + store_partition_info=store_partition, + ) + delta_x = u(mesh.geometry.x.T).T + mesh.geometry.x[:] += delta_x + write_mesh(file.with_suffix(suffix), mesh, encoder, mode=adios2.Mode.Append, time=3.0) + mesh.geometry.x[:] -= delta_x + + mesh_first = read_mesh( + file.with_suffix(suffix), + MPI.COMM_WORLD, + encoder, + ghost_mode, + time=0.0, + read_from_partition=store_partition, + ) + mesh_first.comm.Barrier() + + # Check that integration over different entities are consistent + measures = [ufl.ds, ufl.dx] + if ghost_mode == dolfinx.mesh.GhostMode.shared_facet: + measures.append(ufl.dx) + for measure in measures: + c_adios = dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * measure(domain=mesh_first))) + c_ref = dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * measure(domain=mesh))) + assert np.isclose( + mesh_first.comm.allreduce(c_adios, MPI.SUM), + mesh.comm.allreduce(c_ref, MPI.SUM), + ) + + mesh.geometry.x[:] += delta_x + mesh_second = read_mesh( + file.with_suffix(suffix), + MPI.COMM_WORLD, + encoder, + ghost_mode, + time=3.0, + read_from_partition=store_partition, + ) + mesh_second.comm.Barrier() + measures = [ufl.ds, ufl.dx] + if ghost_mode == dolfinx.mesh.GhostMode.shared_facet: + measures.append(ufl.dx) + for measure in measures: + c_adios = dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * measure(domain=mesh_second))) + c_ref = dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * measure(domain=mesh))) + assert np.isclose( + mesh_second.comm.allreduce(c_adios, MPI.SUM), + mesh.comm.allreduce(c_ref, MPI.SUM), + ) From 9126a44cac06c64f7f3bacbbc6481847d5aec67d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Tue, 5 Mar 2024 12:43:33 +0100 Subject: [PATCH 24/49] Add first set of demos (#84) * Add more demos * Install ipywidgets to make progressbar more compact * Add writing/reading of time dependent mesh --- _config.yml | 4 + _toc.yml | 15 ++++ docs/ipyparallel_intro.py | 25 ++++++ docs/logo.png | Bin 3630 -> 15269 bytes docs/partitioned_mesh.py | 98 +++++++++++++++++++++ docs/time_dependent_mesh.py | 89 +++++++++++++++++++ docs/writing_mesh_checkpoint.py | 148 ++++++++++++++++++++++++++++++++ pyproject.toml | 22 ++--- 8 files changed, 390 insertions(+), 11 deletions(-) create mode 100644 docs/ipyparallel_intro.py create mode 100644 docs/partitioned_mesh.py create mode 100644 docs/time_dependent_mesh.py create mode 100644 docs/writing_mesh_checkpoint.py diff --git a/_config.yml b/_config.yml index c205abf..222477e 100644 --- a/_config.yml +++ b/_config.yml @@ -35,5 +35,9 @@ sphinx: config: html_last_updated_fmt: "%b %d, %Y" + nb_custom_formats: + .py: + - jupytext.reads + - fmt: py exclude_patterns: [".pytest_cache/*"] diff --git a/_toc.yml b/_toc.yml index 4274f63..a08049f 100644 --- a/_toc.yml +++ b/_toc.yml @@ -2,6 +2,21 @@ format: jb-book root: README parts: + - caption: Introduction to IPyParallel + chapters: + - file: "docs/ipyparallel_intro" + - caption: Writing and reading mesh data + chapters: + - file: "docs/writing_mesh_checkpoint" + - file: "docs/partitioned_mesh" + - file: "docs/time_dependent_mesh" + # - file: "docs/meshtags" + + # - caption: Writing and reading functions + # chapters: + # - file: "docs/writing_functions_checkpoint" + # - file: "docs/write_on_original_mesh" + - caption: Python API chapters: - file: "docs/api" diff --git a/docs/ipyparallel_intro.py b/docs/ipyparallel_intro.py new file mode 100644 index 0000000..c0f5b4c --- /dev/null +++ b/docs/ipyparallel_intro.py @@ -0,0 +1,25 @@ +# # Introduction to IPython parallel +# The following demos heavily rely on IPython-parallel to illustrate how checkpointing works when +# using multiple MPI processes. +# We illustrate what happens in parallel by launching three MPI processes +# using [ipyparallel](https://ipyparallel.readthedocs.io/en/latest/) + +import ipyparallel as ipp + + +def hello_mpi(): + # We define all imports inside the function as they have to be launched on the remote engines + from mpi4py import MPI + + print(f"Hello from rank {MPI.COMM_WORLD.rank}/{MPI.COMM_WORLD.size - 1}") + + +with ipp.Cluster(engines="mpi", n=3) as cluster: + # We send the query to run the function `hello_mpi` on all engines + query = cluster[:].apply_async(hello_mpi) + # We wait for all engines to finish + query.wait() + # We check that all engines exited successfully + assert query.successful(), query.error + # We print the output from each engine + print("".join(query.stdout)) diff --git a/docs/logo.png b/docs/logo.png index ca31bd61af5ff5e967b25acffca1ddf41f6ae60c..dd93cac64c5888b7bc69d43032daad9de08c925e 100644 GIT binary patch literal 15269 zcmZ|0XIxWF^e&o02)*|z0up-f(wo2wf&x;c2qpxiN$(_7>4Ko}m*B~t90J3(XcUVG)Xnk!Bv%tBX z#ef_>^V#b^o})uOv#)3Si2a`g^jX#2hS6zjP+&IL0?iq~LXIOZ5<0FWgdK@ODJxh^ zG$4%qxjONNX=u35(k#cF%j-`S0j>$V@rz;QEqkMWM~(ILE%zTidL6v=VZGqh*Gg{V zw_|6syh;*g~@|I z0!B16{^szgUfss0V(*(aeWvG-$j{BS3RbXeDxyRGeS(ExpP_o_I{stc=ksFqg5a`l z&`}6!X=(D|t~y2r@{Hf6pSzjSS_7x)HmJ8?X7K1*=O#f*WfKGJiu}tu_uw-Uf@NhKEPz+RR zQ9~-OSHjASv6A^`*W?>{zqQFbJD7!Wm6zc zi$LB>N8b&(?lzi0cXZRg(a3yF(5Xx+s+xX>7gx5CTgXnZ2hrHu<(-@ecYTkcOENjI zK3+3vXlo+0w$dTUX}dxdYsnYAcv;5aWVsRhPpSL!8aK=pKG4B!Blhqo%)xp?_y62Z z+DR zj9Y~$RTia#5Sw$-K;g+Ng zF&@!`SDmDS4ifLPY>Ins5HW#l|F>Vh68v5XdP8WbS_!_U#NBO{^|kqDn684Q9EuYw z3Jb}%NJNMHZ_5%1mO=XE0To*E@0gnlDL(!4VK*UQ+s?2?h3ux^g7E?BdjH3Vk5?Rz zI^D6d*jSvq5MCFL{@+HgIL!XMCcBjO<)xbfb>0%9{exHi=N~@kAQQFvBk(dLE864X zkSx}(kp0QO;p_hw9-uWF6!)u?bG!h`6UCxDKgYZA0M^GVMPY7DV9L&vSBm^Jfg@!5#gYd5bLWdAE|5^xt>8ktJz>1bCi{O=^ zp@jcW^#0!>#%NlAT9b0`OMZ>c5`yn=UP`#)0jGRpXyvCpN$Cp857b!~L7L^zQjilW ze(XjMOb;ti@q}7{lu^bGR)JJTd_nN_m#UGnNgXkly5~iYgdgyPHO#QZ? zo+hSqd}_Hw%2MnvkT5TC{h{Ix0S@;a?nscn-q6$Sd<0E;(J=zFtzGX2a$;jQmc=ll z$L)StN4&lR&priNsXQuq38@klXpj9 zuawYpvhN^omRVFRz^PzzRLIz$I>B?RR5|!6i(nn)?;>{vwi!kR*j#u$dUMR5gZ9dX znj*2oc;H0Eee5OxqaGw&!(>Tx$&7?>C3Q3_27elrtgqYTmGDrHM?D!URy~hU-IOYP zD_#%oeq>X8My$y$lFuuy#A(>syYAofx7*WVT70_KNQ}UCR0$a6H zbtjnDOw?wUJ8H;}JZ;CEfMjXenAXfe++q_ywtVBaIgD6%?$}^EN+W9Z8~bP>wSyY| z3`>p*L#x2Ti^SevGBUsn8`!;E4{yw22bT94TT#Dc}^n+|zc!0)DaV&`xQx%m5BZL6kAq0lF2%jtVhhidYXJZC8Oh z$j#Ze#-3|~GqkL(-fMZH=yFk!IW%E2{&>=&gV!bY=xedK^StL+RT!(+trnY3a;1pv z$dSy|2O_-foSEHX@z=I5=(18V?^fqF0)pF`y^qn1)LDrIzfOQD5aZO2H|jxZn8G{; z6+1v1SD05KIv*8LMD0Ucqaup#y0uOGI3+S}gsj7Zij&A57;e@xJa)dp@ znuD*te)}U$0av{b7}4fj%J<11t1L!33(1NquvTi*S0~u>r9U8C^Kc+6=3*>aMh0`!=F_7)!WXtxah9d}TW;0^xet^Ko^f_aT~* zJd2#DU)rSi#yg`sS5wLOFIYBd*M>e0w5Rt-O&%l<4gd&C1u(r%BW2st2pAI(0%JU}wPlLMQoe zG2hi!pAp?Bwo5wCtH8x9zxs2M_fr)iwI#0A?%#;iX1?M%uQ48JZ9D zmY%8}KOoZmn76oBY=deo7iOwR-EGxe)OyK2_}oq~q^95$zh!!7TXt*(FrMATT272l z+juTlE_=zmFJQLUCfI5FAXF-9H?N3BN%(Lsb0|mP==T+49~lSWl6v2OfzP=g$*{nk zDt4zcQL0!OI&6W_ZKuivwgDFz=iYWrbH&MVK+d2#d+Tv9q}|JiWx*XMO|AeQ`_>7~ zEM?ezu8P#e+#3ti+uDVpg-XnV!>(xda3+n3wy4-YWoZ0^x|%%gqF=DA7u~=&3q$+q z{Ll|`h9-jB+$QONH84J9vukxkmGRV`N(0fu8js`}1ApJK7x0y98E9n;ON-zi0BMv< zYglMNw$ktGt*VH>3&FX_=72)!6xdFM2IK-Ve=5Mzo+#2!8h55`?L@^EQ;c6@OEzxv zA;Ui9X6n8CV=qt~{mXD8r01inEQbQi;`0y{w)sCk_}Rj{F8cS`2>&2|ay#(uTjK+; z%tf(}e(Ss5>M6vphisIu+m`K8+?&o&XIk*ZC&Dg*?@ltp)@3n(yz2IHBRxK9E5xrG6E0B-f8*)o45(3Nj|QY{c36fzq3#3D?iAJsHYJ2dZR!bD9-lou9)w|gK{9H zTN?xa>yG5oOHPvr%XAULZ7@g5R=T(z>qF>37<()Na;7?fYJV}a&@)0jS-`P%8vX*2 zS{9?O=*h89i+7?B{dyPtiDhzk$`Qj6K(Wu6VGdn*p944j`RR2u9q>_6UE=e?`D~yQ zsu^?xZA;lOc8|FD$^*}$7ncH3Ix4^Q`VN9y(U(`;W#R;67wa$AOE6|oK0u>uJatN# zc}lkpqa?9$V>(~?v9U|l*NIPmiEFZ+iBLz1l&lw44g@KDb<0w$4X>s$x;WO)F#FIm z-sU|}7`(Pu3|!8p&3CPCR(+1kUHzu-`Y5CSOol2W&XsJS(l*|;BS38AXOf9|1#~Hi ziM4D}UGXC=+(^pDD10=%VcJm*ev=l-Uy@` zWx#nM`+5lzx%w^s857&x1LHc$4-rhiHr`nLrjvPe8;|;Iuxg@v7BN;HP)b*QV7mBR zAORidtRyjT+WPUirsCoL?TLrv> z;-9CYSotwcaHE-xmEa2cq)@p3Vf+9hG`&LUePL1^c=b&l?%o>x!E)c;MXi$q-#(WN z(&+?8If9}`j}~=w6T};Irwsoo|*jAGI${2`=8%;Xwmnl*9mu3-IZE#se~-#D%x#v^O~hHT89SZyZjJal`LccUxS zv=xBKw{-iZg?vI;QE7@~tn)0yvwC3*Ouw26oQ~rt`%xVG#4XBoN-{n}0)GxOqGZ8~ z!39*iT0`TvXr04Y;L35-=cMkpN&(!^W1+*ZT~A|{Uwrh{WQzsG48DN#h1Crf(Bl&P zI;6j=y&B^f3ITmomsb9dbg?W+nJJKc2|65^`1&h5?(+}ur^BLgJ5*16^1W-!sIinO zdx%Uz8@pC#FFQ;K*I8QP|jUKQ*|H0QsxF89c~Vc>23AcB!ec}=%4 z`j=$k{L=z;$GfH|AM@eub>@w(=d-;0+@ssjf@s6-At9lxv;~L)_5G;a zI|9IqwVUrI+s%&LKbNq-`q1(mQ! zm`^=JuV~3T0{%*x3)C=VM*4$AjAR?#71BT58vL z_`4AGVI~J~4HFW=1Q;BHTE?linUC$s6=#lb;qHC6HI+Q^z>3IlF0sg*3MF%IkTsa? zG@f~$zC$4RB|t{}Cx#AT8$CIbX`36nVR`RYY^fUxTj!|3_kp9@%l6d17)+qnp&!+x z!@6OBUQy07vV_KC?dPi$W?N}JxoX~J9%t;>xsJ$HL8RjEhRHg2VlH*=xhw2lv#+g` zVJ2sKo?9kOZB+0Xy(0cLYTz;OSj%vGL=)(XtU%od-(*L45>my==r`0u#9Utfvz2X* z0ekmv<#kBHr@%MBUC1ucF0lo8XRP>H+Gu|2XI;$c7Kx1cyeaH>>porc0|YP*D1SIv zc2y*w##Di3N2y_QZ@uCA;^U`FZIae57<_$+`dqv5tyVxOzKvwkJ&vS6zR3I{JRS@e zuH;-Kvx%=h&(=+?x_cs~uhK&ECR&UnK!-W;tNuD?CQS=&=Udf=$7DS+q*~!Z}CoUX-cPfYq8rXm=IwWyt>6*jV{(Do{;?jyeosV=+;j51$`nPVs=wI?6*I@ zaO01ULxfD=<{NpCxBvh$%JxOXP51sEt1 z{5a7#913#ARAAc6hTLCKVy97_Md|d=fvo8i*BNezhwh{6s*RDsrj0Qx{GRtlc{s~f zWj-njx22elc8GWQ5 zxcF&$jq(nz$Gch?>#(kS!}9|GYVk`q zYu@e7g9XM6eOJIwq<#`M#HSo*pFd9*p+Z#mZDdGR&X70Zkh$q+U57>`)L@6!ggMt@5_b3s_KC0o zP#%y=75*{U41NT%??%_@L-$Eqye_2&ZtuxVT=C8|PlF=B*YQdt&BdS91Y*Ju;B{p1 z2NCf~>s%T?O`2RL&h`LYsa&19%shKqR_ihgi2w59#nSFZv|6!#i~M3cw%C@|Vq*JO z55-=51C5uZ)9_JN>a9}G-A$W!32>P6A>Qp)?b9-C*M*Uf4N*tj*70kJm#YL^ztFAg zpoEvoW#i#3CYZmZ8N+fmkWkJ*%6)f7v^Sw{lg_~&zdnoh=nf+Z6fU=|xWkF{E3?}N z;i(AE1)E&Z>z2UF+rh(ofM=SX(I7VcbqC&hSLytHtMBaSW{mP$7ijgA~V zF!mrGvh;vmn^iF#O^pM4R6rs5^f8XTm`Dzk4cOCm;_;v2$=>?HTaEQ;D$>Tr_}+h( z%XCJ&Wp_ST8S0J9ig^y4->1oY^84kb{5YL6-`pQ+v)kj-6wa&ZU<8x!SC;z|Pl*3| z`c{1A96tc|Fn(~i174EYR)wLu7CSnKAq(VW?OBrrHt2&fNuU`b0%oFj%#CkHT4YVf zEWxyD7*07_eCk!M?FL0_jpTu9>R}5y@0``)oI+ob7$vc!p>ZUi&<_eYe>r&v`8cbS z%&2?v+lcVQ-SQ5Yi4THlP$ZT2@%kd^kPg~eNbTD@P;RjRI<;t3c%2ge!9%xE>wfp! zdDkf;q}4$J@xb%=X`9HpLixBQiKqsSWb~)VpY_&e50u%k7jT1z9p*w0$0i~X7F@hI zGZ!A;LP@r1s-t)zq$JycBo4h@B2{Hsv+&<1pvp7!JgEIRjW2So3W>+fpx%6>#x}{0^G``(K*-KF=h_b}D zBBSSFJ6LIW924Wk{O$n#HXZ!|N$=ho{|$3Y#`aLel1t;$X0r9zI>Jj zY=*N}K`@CHVc&UC#^my_iMHQ>%LepTz+;n^3;#1%H+fr@v{sxeH7?Ql zT-x+zPa_-wuT_4+Bxs$uwtj!mwh}#)(lZi$Z~RvRWRRCso?dqnCc!xEiSikPQnfrd zm#`*zvc7CNyhRiq8>xIN>_V$jWwa=GUWO>3J&5I$tVV44TE_jmwoP~j>8#K(Sul-qo}a>Mn4|(E|Q5nOUuqLNn>!|!pMU6<;zwGWXQ)f`7gJA|79%G z!WDAhTf)e=1d)9Dn{l+}RPk97H|1j5(l^!hk_|^eN~^cFB%Q$(R_5{DGOxh0KbR)O z5tH@rL@cR3#0U+!@`2ul{1bMdHmL6KbhVK*lkDAfx!P2dGj52*g=ClPb0&lHX5uNc zP(O(~$aMV6kb$erlb-0RJDg#LcSL6zrNb!kE$|}8{6yE*aETNWiSmfo0O7F_oCx!Z_T6y4QIW`Xgf!WKWpES_76-{P`>Mo_>1eU1 z2O3U*Z$koPo&NZg=UR32y*V1`pTTdL} zx6dfIFtbvn;Ht>%4|8*Euo|%6bt-U_dht%i>(BDb9TfSgb^3*%sXsvb`N}*$+ToJ7 zH6DObNg!nK$QL?Tyum7xsx7%>CgUJaS-*W~h$f5V@M7C{1uY&4F^PEU_>`6QcVL&w zL{_L|;FDpd%JPv>h{1xAr_TKwOKyCj$?i*>PeaD9+W3)}#zzliE9kV6jT|+ewr#sx zaYtlnY50{JUL|DAq(BiMah+!mhbSgHV(r3@Zw&;NxS-RNd^3HC_t|7<*2Ee@5AWi6 z9Nc}`>u*8Fvu+Q@XUSJbUU>^{C25T;E@_+-vr|nW9B5mP*bMRz3X&B;V8#32B1gp@ z)Rbze_2f?$c3b;v#gBneS}5M4J%}yyFGa!*e`*U{*28bqMJOlC2nV;(zOKwJq)S#d z?eP%In{#!#PAv#)VhD;(8VzCnEpAc#AJPZAR;e-Hz~-jK&mRYMi*o)dF<6F^G#2mr zgEW&Ge^FYtz+ANu>Tu{OEhL)Da!AQ{m}O;$+M+q3SnLZg(~ANUc5P|$g%8pNOq$LJ z^(21et?|5$YH!Uwq{Y$P7;3UwXm)yvM%B`TKPt*HVFqn+M!~=QUX>$QE61vZ!oi~M z57K`I&U_C|-fvGg-ZB zU!ps?|7Jg*^YT z%2)cKB9gwQr1wtIXHa1jh|c3KF@TOx+rUYoG%x_w|5(%`#-Gi*=bK zwuCL0K$ouX%qppw)48V%c`spW=>|99cl{x2fl^&VA*0GrX2v<5`fyNlx{c8czHY*Tw#n_5XhllaIHDmn z@Hve%dzzitr{AWUHIPXrZZeK&;)Uef;HVu<7@J6M9lH%m7`G)zuPLukbD5yns+0O1 z*Y5@_&kkjnR9%kusQ2W}BUu0wi#}1DWE9bw2?`3ptW@PS6q>h>*Cc()g{z})eFnl) zPjm)#xMM)8v!6USo9(0rYf)!ZcxNp1!^`swAdB%y=%~0(3F|YHwKD`>&Vi>q#vkVv z#D1?!`%g()dL7y?`n5{Y=F2fT*+tA28+6NH>66&n^+#vDW z+k9tM5_f{Pi3<`y>rm6CGmE0tz*k){QR{j2EFaS>3|^b{cgN*g*(dP9?tpqRmOBva zfr-UzY}u%TVwP+$$EiL3K|#yjM~=B5dpeb@;?I5`A=j}6@=Yk|k;d`?)jel$XNS1z zP~{dsXmt#x7@)LoVCEqy=*wPV6d^qr^r)En=l8%b=Uq8;DkjVXDsnkXsN-@+f8$?DJ@mX z1%lwvVc|4=Rr*swFMcnvMISDk+{2KS z9?)7Nto~A(Ln8Z&woFCOeN+X`+|lPcjw)yP{NuoGpWC|@rfSK0;61)!U7$db<&XE+ zXCIP}#2-Atz*&L@QTGcSE<^*3J07rr3!;fl?7){Klw{k!F0od_-keQV$%1Ko$|P0q zLYX8KeJ@>u9vQ5$oY37)1# z!oktv-so17yV_QQ)f)qGtpCt4H247d#6n4YN#~WCJwd=HD~#jS&m8bzEc^bUXBTDp zt-G{kRgex)C{du)A$z&hSDdee8qh9o(3XH)mKzkwedekf^MdUAbHLtz!RXzWBW#zU z!#23GAw{ewuFqHabrfo^uwv*X9vwjdLCp~i`&{^5?2|yq=mg7OGu~R%XY!o5QmqS; zeXkAqM{s|jDs>w(rwPGfLqk$RFHJp&R7oEb{YSkh_xl6i!C(Hmq2H)MLQ!s+h$$xg zmH9i;Q;vPrqN7F5fBX>moTbc@FM9hkAI*d|`KIEn*Yw8lozumNx!Ez{_2xyary_Zh z#a#cxC4uX2x2e#b66O7IG~4hmnJ~cbVYyN6JZvU^8SUwSK2R^{Ky(K!0mx<%M@X4D z5kPD4k%+IrdIEHN-aRtyhoo5!Lc&#F?I8cYh$w8`^wfOFV9$0{`7>6@@3BVwS&oLV z=_1KOip85Ce(OdLHOGp{S=2V#;z20!{#|bZa9=_$ZvD#dJ)!Df)vLjY+({|HuD~qX zz=2H2V&_QNK1^>N(O}RY5g!HxPbgOsS9WoXys?H4>8MyhNI5PM9Z)3vkxg|*YRd)7f^;0=diXIF=lo4*#Pe{sc`AN(j3wQ`Z&Mrq?A*>*B$vCG`q%+HOJbc zZPC2Ag-E^PT<)IOIT-7MRM+eq(8845`|iZdoZQJ+QiZMz^RK2sTVsXn4+xkoC6-Ml z%UyLJuuoK@6zpbnZ7P*TYU^tK2VAAz6vW$79>@V1l}0lI#EGZ?$G}peMT1Q_&Oo)f zGdTqZIq3?!T1=PEKH|UadSzfB`(!iDQh+U~nI`jvn7YQFVpvog5c$6EQ=?3;n8rr~ z@~8O$nDcoH{4{K30mMPV!K#A5R7fp{Z~23S`aj9}YW-t~7^tc*4+t==|EK=jG>Lk# zlxI%&M7>SVG-yW)Nf*bDoWmQnt7zm!t-Lc^_#&9@v8Ud>I0}+4ByH`*IGjn~gRmV~ zGY9X(GV@ocN5vW$`LV}l2t<~5G0KI&u88_8m4P3~^DBE3$T~o`oN6G%gdsy4F|aCU zmS8^Rm=O)?Lkwiqxxd|!arCx3Y&R{t7fx+fJw_K6MpGVAUF`2+-Yye-LmlJsfv+VpwJth6!J0g6 zMT%Xd_eiSyo`8`0a!Pf^Zc2w5+i***&EeiPj6CdC9!F@PZu4-*SV*wczX40w;5dIs zVVy#ez1BVAZnWW11n(7*(bvdp08WD+0RjqJesQ@fs;3M(s?WSvHIP?7xL$o<0fzJz zn_@yGyzU+Dag}N1Y04?h#GAHqisV~at8nf&R+LNyNR>x4W+ZlV_7=6yx!}+};bD}g z%4B%~-l)sR=By&j-wlbC-iF!5>H6D64Y2v^SgnI`+w6Lvko2X`v;vrIZ&?Zl0^&50 zVzE#55{1-7uSIBiM)UL)9;Cld8B9^adzn4snzO(^!rr9ct5ih5s!JnR->FLB{(Sgd zt<~IKTIllIEh-A9ts&w zrBJxHo0>_&~2lc^x-OOCenzEJvtpS7(XOzviL}B*vDpN z?=$5o)qr(qQcZCc`#pQk#$2nV+cJIPW;J)&lQmq}x9A}fAcfoK({)c+emHj_bppE3 zTPec>B^(;&oTdVnFjV!Oe{z%;{qYyb2YYF{g&HCRvJFG9gEcu>mb6bwLZc6%Zn{wC z#4@+8*@8xLJnR9lejsY;<08BZfe93TE0t&javirZATRvdG+qQ6u8EFs7e?ORe9Y4~ zMA7ihqc2AP_;#z`pt!p1U>2uew2D9gs5$M5e@Ub9Tw@^4FxSchGy?4)3uO(|xrR=1 zNeEO=c<^q&yU4XG92hEJdhq4Vr+m~Ep>(E^11Lz5%FN=VL}rznOchBb4JCA^%~E7J zc{&K~^W!xqXpQ2pBizu-RR*=FAn^V@Y^mFOrRrOR{McP!8{ZO)*OrpPs`&5T&6`o2 zlqycCkv6N+4e+V6i#Y7pEzo@j;a|8|1&sXGZ9Ba?)FyQwDr#Vk$gCuJG4 z79}TbSe#k*mSWROs^+opc7Y-&PSSi>AAj5aEi*CZX%m7XP%BtT`f`&^M0 zQ9<-EAIDRH9eQq3yKE#m%BYxVWd$V?8)_tCDt|J%^Fo$9koM_Xfv?m+Y(C|L#TRbR zC?;jq6Mo??@5(Vn29g_;rMov!5g*7P!EJ^QDmSHC6eBU0Z-XqZ$q?+J@`Z}LL*UJd zTvWdY8YeWm-1edp@|0mOqka1B7fg`Whszl2CvwgyBES|V}O|Cq!Y66kwU&*&tSe0gJ$Y}QEvIpL7OTC6|I zzFdZ>Y9Qd_E3mQp0S!0PRi`z0E!ZIy^|6zfP!!Ixn;-j>fOY%c`z?LV*M z-nLYrk3W7((fc?KGvqB1Mm$)XJWtfdTqUaNl=kt&QHJ`DVLW7r^%@Y6&>p8B{W=RN z_+&juPWtyS6@ka8OF$_L6o#m^m(w3d;Z+_E(GWe5RhgFN3IRuIb;ZvSQ6*a~|3Qtq9#BYuU$ zET4106|d2E`D+DXu{c$9Ekqd$+|xZ^nbv}jYQ#_HB^ih>MS)jc$Ba+wiowNtSwAZ{ zyqH>SxF4rFDAv-Z5{JK#D)~A%6T4!94JD1=TphnIyfYVYd!Oe^^+Y{M;N4$xlG4I= z{AoKG0+oD29DBg_L`(V&pDg%C)Z(WV07mt}T&pVU*6&xBDj}M(Bx1X$bkVq&B@zqo*JazVhVQ6lkbI2C zJ)Za##|Rl+FdAL$+%b;`aKR1VT>m(ojDRZcC4r9^Q}&eqohUNI zFJ?&>fe4#XKXtR=SwWNIY)3qS&GbgyiW5F2gfxw?RvZC+D}o+x$a});NrpvL6&=|9 z_4NDo(2N9NBC_{k5CpX9Ssa9WSj;ok_Y&KRx{ndwHi<3#5SjwwB3=VE0}fC-{Z#Mi zxcy(+FhPqH-!G%LK5B)~{;85`wpryO$i9X<#in>FrkK!+g6}@QHf|#mwTFbQr<`5Y|ke|cixD9MQT23=GJ^P z47uZir-qf&%W4w*M{D&{ThfrfTf%f zClm^?5zMK|#m4J$V3`pTJ<^XlxhWI!Diyepribw30C z{cgq+_F5t#U({1yF03mplmnrH=o%^FcCVtmaDI)g{EZnKJ{V_PkG{! zUfbGv-^TKi)Zkg?aQQQoO&7W@7e2yUMs*7R2Zz%-K1&@UFMv1Qhe+;-BRBQ|T=k16{DrCjZQAYzX&bZ2iYztNRULd(TC{y;hpe3D zDsyv@+Z2WGJXEdgq&~gn#rk8Nyy<#VyV=Nte33@F9{pl5j~Fs|5TCUoTOn6_b@dz{ z%#cw!DZ%h-H?gPQ9nw@NNvv$Lc%<|uA5{?Fley}QV%3NxYw$VkZpP2Le~U=TM6(d7X{%VyU{2ol(NY z2ubB+n5~AH`|JLX2w`!12WBcQ?yDu`$WtD6gx_^L7cl)YSXbD4$WKVh=_y+jm?xz! zT-7+QW3~peqv{6`7UpZk5V|SC5;)KZR?}!c_$~vu~nu}j^hx!`+4JF1AI1J3MV zLvTgR9gFl+J6OmmUaDRwU19w(<)x}`{)63M-+<>(E}C3;0C*YN6z%cis0wfHQgjf^ zVI+KFE+bHCz7&OhUlaj!&K#}`{&rq(gq=3I^46p9=~ zg4k?di82lryS9rwROx!?2!2*aWu)RiZ;|Pyxc8T`S&~VXxR)^<=D#Q>~ z!*T?Lr7jj&brt^O8?=>+P+*c{L)M5*u$O5jnh^g0IYLwZiGtL2c(Cu=;(I4{&2k4( zsl{lAc`f-K1L0cO!bj6wG#Ig|{>m_vj1#Is2{RB+gNFl7N%om!s;tti(}ilmpGY%5 zhh}daJE7e@5EhekafGkxF z12Hb*%yDCm%KS0W=z3fwYrs#JHDXuN@=;}WBa0=fkvsWTI|)Y-zfFwR`C)<8kO06s z#s+5_zw^ZW4z0uuD6?hCHTSoYnZGa4R9BJm;u`Z?jqLG{YF+x0PFB(EFtx^PQkF6! zq)tb3(X1}hT0~mx>#u~v15-t+f9qkNO{(MyH@_{#@5)#w-^+M<3E2TZdT8@};(M9u z8N0g8M$b1{iX+m$++KvG_OjU;y;pc^Uj*-m?ZW2ZJmqxcfG2=QMLP|WH(+Q#0+FuJ z>88O?dZW`^Zm;cE1pz|QwUmPWKB6HW9i)%S6 zG@Gl4rZ9=9W(@j(D&v{uPicEJQ?xSFh<~1|B@A9e22WomT~H?j!(kc-MZs(0V&Qkr zV=~AH(6L_b+AH)TdaO`hJw^n$tJU>~6FYuN{k`d7PHu_O?ZHbKw#$S28KDYPEQ~G5 z;0CHsoTor9-df;aRe_17Zyj&!KkxG3kILK*01lQL{#0WY4AfzqkpB6fdn)b*Tq3>Q z9)hUqH=bHxKmgd7Csk;J__DJi11cpA-+>*!bkKHz7=1M2e ztt@_DurC}DrwTc739CzyG4PWkqxaKLFSl|Va@AlqofYnG9*m8(v(%Y>Yj5Am* zi5{nBy$`1eEN|XPcYIw&zpM@y_%d1uq+EUTsF6Ms5|i_}e?@%7T<-8|P|sxznx=)yDydQUAyRkBsGkY6 z_}TI(GVEtx$TrhdtIaC>au$5~t%J&%_?oVPd-dcb3jL>WelD&G8|oW(#YR_as^(O* z17lzO4}NmyEab0~zCcq8WT)0iT?WzP;;tPT)JKUB04L#3ro@h5A!#erT0sYCAj>5F zkR(=$hC@JwybZiKSNY3W|1fyf?%YRGh_w9Ue#k$>4idQNeIeLlQrW^uM3O|FrkZ+Q z_Y6Gn7BmeB^!)JqK3PVyKk&Y8s(o?brXThRR#pAshMZoDN$4Rr!-9S4R!zXMGkD?V zQSl$U%nsBRNmhqAZp>W+IOJ&7r^+^?9K6kXrOduVvSJD~eS}V)icxA8lX-u*QuZ~3 zBiU}`Cc?*aPdZAj*oJ@7mAss}VcaE|%YrQ*6;#On(jPLy&u&6|az{pP?N)}1Dp4R; z4f*1F>Vr+q*&BZb>_VbyD-^AVn+Sl+s}K#|>l#474;gfJho7>#98WUO(*MCJv@lg#nnYrE9QUyCNV zyO5oeS7Qpbp~F7REj4aQ*PZCyUk=cLr=Hpu*HR4fD)B!{L)#{_WI4cuGes-sBn85y zY^s6M_0|vi58_5?ndSf|C&9r}it=MBj4KERj`de@S7K3Rr7QRwFz=haks-I*f2Udb zeRV8-pFZ)0D>-??2^WBrgp`!1gsiBPjG3gQl7xhkq=YcxTS6i&uxIuE>frAA)CCdr z|KH&Uf4CK)1M5Ex4?PjS0Z+W)0F9@fj__Oh?oXWI58zLp20ibG|2sPi(AP26uGVyj F{9i<9hr$2= literal 3630 zcmV+}4$<+6P)EX>4Tx04R}tkv&MmKpe$iTcs*34t5ZA$WX<>f~bh2R-p(LLaorMgUO{|(4-+r zad8w}3l4rPRvlcNb#-tR1i=pwM<*vm7b)?7NufoI2gm(*ckglc4)E8@Of@^k0ade% zR3a{9va3Sy72OCSj8P0q%+%*ZF$vG{bq^n3?_xa5``n)+tmI4v_ypovrW+RV2J!T! zrE}gVjy{D4^000SaNLh0L z01sgR01sgSs6VG^00007bV*G`2j&S34JarpKN|G_01PTgL_t(|+U;F=R8`d)|DEv; z5X!8AVj>_aibIZIW;`_|Q*$h9nWdQ|=1W^z_Ou+H`dVtKm1{kRSFS>PZ@sj9=1^)% z;eb+z;(!pQpaL4)`AqMRYZ%^h&OI0J#lZXh$=>(ueZKFvzxiwkNs=Vo2a+nU!Jf4i z{D}jQY*tWhJAi$98t9$_;isle#QgxBc=;mCZ;XR<^M=cqZpwV<#?A0c#+wJ+Tmx`+ zc>pvE0SK~*gHDoQo;e)C#e9VwkqCHyC#d+YezbUrEN?vvKKFasvB1sQAX~O+(EX4N z%>beBq%zZR>DSpZLLFZl4zq%B3pTD>QE;tOvMVmkbhx;Yd zgo(Z%ixN5mjEgpb&&vjX;s9W`foz)q=CM&AjDdcs_(A8x#gqGkPVVoQg5SLQ$xnXJ z`RRc(ubcxyaUq1F(~y3v1S}?J$|E2MWJEN`*0G@4CIA%ur+P8*$|VTre}-6I3Rukm z!-5D30TtT`REO>Wt!=+z=>-Ty1rV?Q3YcpEiUtuB3?eiVbeFy$nuk4*z>&-~5X(v+ z)!cIOm!trl9z;+G$mn(;bOz5s$LAdZ_e~b~qx&IQ&FX88nuBBaXK?J?0`c-i<^381!KObC zcI+!49%}B&P*XJ=U;PW*-Yke^r7AB@fJ#V)Zek|r-f8MWx>W)5?5AYMsQ6?U=C9W* zaMKGZ09;GvWOx*eE4TXwWt;23Qsp%bFD^kSJO#_b7r;C^0){!O zm8&VDsQ3;m! z9Areat53)0Wy74A?h)Wz7;@t(Z0p{FIWryNmEs0b^}@w`m|q{I32=3IQ+W-Rxnm)g zUH|~tx2%Na?dP=t?rueJXeTU7rz?P=krL;Cb;V3rSIpE7I614CdkE&tCuQ4a$I3F+o_@TZPIs=n!Y zJ}Dv!R8k)h!C`>K1mSWKgww|WNmM=rH!Hv&+Yh?m(8h~6frowTD&;sKv;}lyUIyKB zAjs%;PCi>qkSfZ+|NJ8yS?eHHl*wD{17A8p$N#uTaRGMB%h12%8fj9YEkGqCgBdab zQq^_Xx2}R?&o)4Co^t(^`OaXXA@Me76!g>I0TC2@m)+xF|KelVKY33TFttn#3Wh#o zDVPE2E*&vq`DIv_XFw=CrEF#kjyG!1@p;*hZdJ%f+OeHrT(J#AU^ADD0MRB6Y@0YR z{f5GnF$B_0*JlWq&N$7hSbkX1VBU z0h}Bj*ufJNeMJ%gq#6~|W2)A?St_Xy0ypf3?uB^`$|NGR1@zMwxt*sQpW!x%&5oT8 zD!z;QdV$TrJ~>$#bXP7l?ik`$rLw!>hBviyTg51k0RYh~RB?iJ?R-W1EGv-aVQ!l= zy-n<}XF=$V?)TO#6jVa8%Bi8jj+m@zO}f`0WuaM2jT&@Rr$@jHod_mn80h2_Q0uKhvx9Rj-ZBhJM&Ml20Ma=m$q42_9b z44SCK?jV9fv_0o{9*BZW2y1K9pkva;s5aUv9W=4@yt8C_HW&QiUFx8tQ-%Q>m&r2_ z0C*mJ?ji8GhiaWhszWzW9eaXG>;WpV$NlSrqZv>!@!B=9oX__=klJv$jxRTAMm2x(F!+;)Yt0hX zu_u_`kAv^EyI)4ut$1VB<2--3LH7Wj?6bPQC}724C*(=(Ij# zO7oTL7fqlXCJNy8ZinrY_aIeW_u6Cc)LvOVjEgpd9X<8lt{IRdXRU2kIvhJT`JOSP zhkIppDk$xPycUvI(X@5iD$NRvfjq&*8-|Yw5K3WKF z-xp2Sx$#z%E{Yx)}D*A=%762cG?(OCXkVdR=}ZRoygYhp?hW)=zc@$Z3#UJuLxtH%NJZi zQ31F;SHC#8ECf z->q7`dn2)(pr5fAy65L8yDVOIL>nbBtfGmPs;)!QSZ?Hx?)PoLdb^=jHbXT2Rc(NS zdTF%|7aU)I0@I8kaQu6N=PFa>`&B}3@PiJ{Z%Gs*{u2k?1EQi#HPi1E^J-DdcRO(`AQhi|J^EojT) zX^MV=a4rw#*Pa5^J_&T6G*Io6K!mmcp*KLfRSEINRq#3cz~$_9?HvC7C7OqU?v>_8 z9Gq96aeKF``fD}M^#a|c4~Xz6K;R+%S_I)-UW0w`LwD(`47!6mKt@JGH|ejkiE**4 z6t)eEz~}69lWj>G1OE5{`VwNcSA*tijj!aD_xTyR_Y< zOvspc1g!lU`po6`bj3Rv))I!-SHUoMjb|-K4~)7aVO+QgOzLpYfY3b^0jvJoV0WkJ zr@rm<-8^)SsDxw$tl0swwQ|r@_lYD0cG$CEhfV;0>^pEr{s;cpx9&gRqiN7x`-6FG zB$}NR-Vbcd8qQUJnW~V+Qh3=8dNkqtApXqkHF;Y zgZ;BL5H2YFPYnW8Vh^yRr-JGKM6K1J;*w;|tJ3upWXl*ueiJOKxVCQ6Y|#CO!m({V zxa{2k=TPQra5&hJFG4qZD!{N1Ln1+UOO>7bf8~*#NqiUNF#rGn07*qoM6N<$f{1z1 AC;$Ke diff --git a/docs/partitioned_mesh.py b/docs/partitioned_mesh.py new file mode 100644 index 0000000..8ab2751 --- /dev/null +++ b/docs/partitioned_mesh.py @@ -0,0 +1,98 @@ +# # Storing mesh partition +# This data is re-ordered when reading in a mesh, as the mesh is partitioned. +# This means that when storing the mesh to disk from DOLFINx, the geometry and +# connectivity arrays are re-ordered. +# If we want to avoid to re-partition the mesh every time you run a simulation +# (on a fixed number of processes), one can store the partitioning of the mesh +# in the checkpoint. + +from pathlib import Path + +import ipyparallel as ipp + + +def write_partitioned_mesh(filename: Path): + import subprocess + + from mpi4py import MPI + + import dolfinx + + import adios4dolfinx + + # Create a simple unit square mesh + mesh = dolfinx.mesh.create_unit_square( + MPI.COMM_WORLD, + 10, + 10, + cell_type=dolfinx.mesh.CellType.quadrilateral, + ghost_mode=dolfinx.mesh.GhostMode.shared_facet, + ) + + # Write mesh checkpoint + adios4dolfinx.write_mesh(filename, mesh, engine="BP4", store_partition_info=True) + # Inspect checkpoint on rank 0 with `bpls` + if mesh.comm.rank == 0: + output = subprocess.run(["bpls", "-a", "-l", filename], capture_output=True) + print(output.stdout.decode("utf-8")) + + +# We inspect the partitioned mesh + +mesh_file = Path("partitioned_mesh.bp") +n = 3 + +# + tags=["hide-output"] +with ipp.Cluster(engines="mpi", n=n) as cluster: + query = cluster[:].apply_async(write_partitioned_mesh, mesh_file) + query.wait() + assert query.successful(), query.error + print("".join(query.stdout)) + +# - +# # Reading a partitioned mesh + +# If we try to read the mesh in on a different number of processes, we will get an error + + +def read_partitioned_mesh(filename: Path, read_from_partition: bool = True): + from mpi4py import MPI + + import adios4dolfinx + + prefix = f"{MPI.COMM_WORLD.rank + 1}/{MPI.COMM_WORLD.size}: " + try: + mesh = adios4dolfinx.read_mesh( + filename, comm=MPI.COMM_WORLD, engine="BP4", read_from_partition=read_from_partition + ) + print(f"{prefix} Mesh: {mesh.name} read successfully with {read_from_partition=}") + except ValueError as e: + print(f"{prefix} Caught exception: ", e) + + +with ipp.Cluster(engines="mpi", n=n + 1) as cluster: + # Read mesh from file with different number of processes + query = cluster[:].apply_async(read_partitioned_mesh, mesh_file) + query.wait() + assert query.successful() + print("".join(query.stdout)) + +# Read mesh from file with different number of processes (not using partitioning information). + +# + tags=["hide-output"] +with ipp.Cluster(engines="mpi", n=n + 1) as cluster: + query = cluster[:].apply_async(read_partitioned_mesh, mesh_file, False) + query.wait() + assert query.successful() + print("".join(query.stdout)) + +# - +# Read mesh from file with same number of processes as was written, +# re-using partitioning information. + +# + tags=["hide-output"] +with ipp.Cluster(engines="mpi", n=n) as cluster: + query = cluster[:].apply_async(read_partitioned_mesh, mesh_file, True) + query.wait() + assert query.successful() + print("".join(query.stdout)) diff --git a/docs/time_dependent_mesh.py b/docs/time_dependent_mesh.py new file mode 100644 index 0000000..e6e22bd --- /dev/null +++ b/docs/time_dependent_mesh.py @@ -0,0 +1,89 @@ +# # Time-dependent mesh checkpoints +# As we have seen in the previous examples, we store information about the connectivity, +# the coordinates of the mesh nodes, +# as well as a reference element. Note that the only thing that can change for a mesh +# during a simulation are the coordinate of the mesh nodes. +# In the following example, we will demonstrate how to write a time-dependent mesh +# checkpoint to disk. + +# First, we create a simple function to compute the volume of a mesh +from pathlib import Path + +from mpi4py import MPI + +import ipyparallel as ipp + +import adios4dolfinx + + +def compute_volume(mesh, time_stamp): + from mpi4py import MPI + + import dolfinx + import ufl + + # Compute the volume of the mesh + vol_form = dolfinx.fem.form(1 * ufl.dx(domain=mesh)) + vol_local = dolfinx.fem.assemble_scalar(vol_form) + vol_glob = mesh.comm.allreduce(vol_local, op=MPI.SUM) + if mesh.comm.rank == 0: + print(f"{mesh.comm.rank+1}/{mesh.comm.size} Time: {time_stamp} Mesh Volume: {vol_glob}") + + +def write_meshes(filename: Path): + from mpi4py import MPI + + import dolfinx + import numpy as np + + import adios4dolfinx + + # Create a unit cube + mesh = dolfinx.mesh.create_unit_cube( + MPI.COMM_WORLD, + 3, + 6, + 5, + cell_type=dolfinx.mesh.CellType.hexahedron, + ghost_mode=dolfinx.mesh.GhostMode.shared_facet, + ) + + # Write mesh to file, associated with time stamp 1.5 + adios4dolfinx.write_mesh(filename, mesh, engine="BP4", time=1.5) + compute_volume(mesh, 1.5) + mesh.geometry.x[:, 0] += 0.1 * mesh.geometry.x[:, 0] + mesh.geometry.x[:, 1] += 0.3 * mesh.geometry.x[:, 1] * np.sin(mesh.geometry.x[:, 2]) + compute_volume(mesh, 3.3) + # Write mesh to file, associated with time stamp 3.3 + # Note that we set the mode to append, as we have already created the file + # and we do not want to overwrite the existing data + adios4dolfinx.write_mesh( + filename, mesh, engine="BP4", time=3.3, mode=adios4dolfinx.adios2_helpers.adios2.Mode.Append + ) + + +# We write the sequence of meshes to file +mesh_file = Path("timedep_mesh.bp") +n = 3 + +with ipp.Cluster(engines="mpi", n=n) as cluster: + # Write mesh to file + cluster[:].push({"compute_volume": compute_volume}) + query = cluster[:].apply_async(write_meshes, mesh_file) + query.wait() + assert query.successful(), query.error + print("".join(query.stdout)) + +# # Reading a time dependent mesh +# The only thing we need to do to read the mesh is to send in the associated time stamp. + +second_mesh = adios4dolfinx.read_mesh(mesh_file, comm=MPI.COMM_WORLD, engine="BP4", time=3.3) +compute_volume(second_mesh, 3.3) + +first_mesh = adios4dolfinx.read_mesh(mesh_file, comm=MPI.COMM_WORLD, engine="BP4", time=1.5) +compute_volume(first_mesh, 1.5) + +# We observe that the volume of the mesh has changed, as we have perturbed the mesh +# between the two time stamps. +# We also note that we can read the meshes in on a different number of processes than +# we wrote them with and in a different order (as long as the time stamps are correct). diff --git a/docs/writing_mesh_checkpoint.py b/docs/writing_mesh_checkpoint.py new file mode 100644 index 0000000..c33da07 --- /dev/null +++ b/docs/writing_mesh_checkpoint.py @@ -0,0 +1,148 @@ +# # Writing a mesh checkpoint +# +# In this example, we will demonstrate how to write a mesh checkpoint to disk. +# +# We start by creating a simple unit-square mesh. + +from pathlib import Path + +from mpi4py import MPI + +import dolfinx +import ipyparallel as ipp + +mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 10, 10) + +# Note that when a mesh is created in DOLFINx, we send in an MPI communicator. +# The communicator is used to partition (distribute) the mesh across the available processes. +# This means that each process only have access to a sub-set of cells and nodes of the mesh. +# We can inspect these with the following commands: + + +def print_mesh_info(mesh: dolfinx.mesh.Mesh): + cell_map = mesh.topology.index_map(mesh.topology.dim) + node_map = mesh.geometry.index_map() + print( + f"Rank {mesh.comm.rank}: number of owned cells {cell_map.size_local}", + f", number of ghosted cells {cell_map.num_ghosts}\n", + f"Number of owned nodes {node_map.size_local}", + f", number of ghosted nodes {node_map.num_ghosts}", + ) + + +print_mesh_info(mesh) + +# ## Create a distributed mesh +# Next, we can use IPython parallel to inspect a partitioned mesh. +# We create a convenience function for creating a mesh that shares cells on the boundary +# between two processes if `ghosted=True`. + + +def create_distributed_mesh(ghosted: bool, N: int = 10): + """ + Create a distributed mesh with N x N cells. Share cells on process boundaries + if ghosted is set to True + """ + from mpi4py import MPI + + import dolfinx + + ghost_mode = dolfinx.mesh.GhostMode.shared_facet if ghosted else dolfinx.mesh.GhostMode.none + mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, N, N, ghost_mode=ghost_mode) + print(f"{ghost_mode=}") + print_mesh_info(mesh) + + +# Next we start up a new cluster with three engines. +# As we defined `print_mesh_info` locally on this process, we need to push it to all engines. + +# + tags=["hide-output"] +with ipp.Cluster(engines="mpi", n=3) as cluster: + # Push print_mesh_info to all engines + cluster[:].push({"print_mesh_info": print_mesh_info}) + + # Create mesh with ghosted cells + query_true = cluster[:].apply_async(create_distributed_mesh, True) + query_true.wait() + assert query_true.successful(), query_true.error + print("".join(query_true.stdout)) + # Create mesh without ghosted cells + query_false = cluster[:].apply_async(create_distributed_mesh, False) + query_false.wait() + assert query_false.successful(), query_false.error + print("".join(query_false.stdout)) + +# - +# ## Writing a mesh checkpoint +# The input data to a mesh is: +# - A geometry: the set of points in R^D that are part of each cell +# - A two-dimensional connectivity array: A list that indicates which nodes of the geometry +# is part of each cell +# - A reference element: Used for push data back and forth from the reference element and +# computing Jacobians +# We now use adios4dolfinx to write a mesh to file. + + +def write_mesh(filename: Path): + import subprocess + + from mpi4py import MPI + + import dolfinx + + import adios4dolfinx + + # Create a simple unit square mesh + mesh = dolfinx.mesh.create_unit_square( + MPI.COMM_WORLD, 10, 10, cell_type=dolfinx.mesh.CellType.quadrilateral + ) + + # Write mesh checkpoint + adios4dolfinx.write_mesh(filename, mesh, engine="BP4") + + # Inspect checkpoint on rank 0 with `bpls` + if mesh.comm.rank == 0: + output = subprocess.run(["bpls", "-a", "-l", str(filename.absolute())], capture_output=True) + print(output.stdout.decode("utf-8")) + + +mesh_file = Path("mesh.bp") + +# + tags=["hide-output"] +with ipp.Cluster(engines="mpi", n=2) as cluster: + # Write mesh to file + query = cluster[:].apply_async(write_mesh, mesh_file) + query.wait() + assert query.successful(), query.error + print("".join(query.stdout)) + +# - +# We observe that we have stored all the data needed to re-create the mesh in the file `mesh.bp`. +# We can therefore read it (to any number of processes) with `adios4dolfinx.read_mesh` + + +def read_mesh(filename: Path): + from mpi4py import MPI + + import dolfinx + + import adios4dolfinx + + mesh = adios4dolfinx.read_mesh( + filename, comm=MPI.COMM_WORLD, engine="BP4", ghost_mode=dolfinx.mesh.GhostMode.none + ) + print_mesh_info(mesh) + + +# ## Reading mesh checkpoints (N-to-M) +# We can now read the checkpoint on a different number of processes than we wrote it on. + +# + tags=["hide-output"] +with ipp.Cluster(engines="mpi", n=4) as cluster: + # Write mesh to file + cluster[:].push({"print_mesh_info": print_mesh_info}) + query = cluster[:].apply_async(read_mesh, mesh_file) + query.wait() + assert query.successful(), query.error + print("".join(query.stdout)) +# - diff --git a/pyproject.toml b/pyproject.toml index 231a703..4200b65 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,8 +13,8 @@ dependencies = ["fenics-dolfinx>=0.8.0.dev0"] [project.optional-dependencies] test = ["pytest", "coverage", "ipyparallel"] dev = ["pdbpp", "ipython", "mypy", "ruff"] -docs = ["jupyter-book"] -all = ["adios4dolfinx[test]", "adios4dolfinx[dev]", "adios4dolfinx[docs]"] +docs = ["jupyter-book", "ipyparallel", "ipywidgets"] +all = ["adios4dolfinx[test,dev,docs]"] [tool.pytest.ini_options] addopts = ["--import-mode=importlib"] @@ -28,7 +28,7 @@ exclude = ["docs/", "build/"] files = ["src", "tests"] [tool.ruff] -src = ["src", "tests"] +src = ["src", "tests", "docs"] line-length = 100 indent-width = 4 @@ -40,7 +40,7 @@ select = [ "E", "W", # isort - "I001" + "I001", ] @@ -56,13 +56,13 @@ known-third-party = [ "pytest", ] section-order = [ - "future", - "standard-library", - "mpi", - "third-party", - "first-party", - "local-folder", + "future", + "standard-library", + "mpi", + "third-party", + "first-party", + "local-folder", ] [tool.ruff.lint.isort.sections] -"mpi" = ["mpi4py", "petsc4py"] \ No newline at end of file +"mpi" = ["mpi4py", "petsc4py"] From ac55b8a5f3509217cce64c0b88aab05577d82604 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Tue, 5 Mar 2024 13:30:22 +0100 Subject: [PATCH 25/49] Add example of reading and writing of meshtags. (#85) * Add example of reading and writing of meshtags. * Minor tweaks * Update toc --- _toc.yml | 2 +- docs/meshtags.py | 85 +++++++++++++++++++++++++++++++++++++ docs/time_dependent_mesh.py | 1 + 3 files changed, 87 insertions(+), 1 deletion(-) create mode 100644 docs/meshtags.py diff --git a/_toc.yml b/_toc.yml index a08049f..28809d4 100644 --- a/_toc.yml +++ b/_toc.yml @@ -10,7 +10,7 @@ parts: - file: "docs/writing_mesh_checkpoint" - file: "docs/partitioned_mesh" - file: "docs/time_dependent_mesh" - # - file: "docs/meshtags" + - file: "docs/meshtags" # - caption: Writing and reading functions # chapters: diff --git a/docs/meshtags.py b/docs/meshtags.py new file mode 100644 index 0000000..9d243df --- /dev/null +++ b/docs/meshtags.py @@ -0,0 +1,85 @@ +# # Writing MeshTags data to a checkpoint file +# In many scenarios, the mesh used in a checkpoint is not trivial, and subdomains and sub-entities +# have been tagged with appropriate markers. +# As the mesh gets redistributed when read +# (see [Writing Mesh Checkpoint](./writing_mesh_checkpoint)), +# we need to store any tags together with this new mesh. + +# As an example we will use a unit-cube, where each entity has been tagged with a unique index. + +from pathlib import Path + +from mpi4py import MPI + +import dolfinx +import ipyparallel as ipp +import numpy as np + +import adios4dolfinx + +assert MPI.COMM_WORLD.size == 1, "This example should only be run with 1 MPI process" + +mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, nx=3, ny=4, nz=5) + +# We start by computing the unique global index of each (owned) entity in the mesh +# as well as its corresponding midpoint + +entity_midpoints = {} +meshtags = {} +for i in range(mesh.topology.dim + 1): + mesh.topology.create_entities(i) + e_map = mesh.topology.index_map(i) + + # Compute midpoints of entities + entities = np.arange(e_map.size_local, dtype=np.int32) + entity_midpoints[i] = dolfinx.mesh.compute_midpoints(mesh, i, entities) + # Associate each local index with its global index + values = np.arange(e_map.size_local, dtype=np.int32) + e_map.local_range[0] + meshtags[i] = dolfinx.mesh.meshtags(mesh, i, entities, values) + +# We use adios4dolfinx to write the mesh and meshtags to file. +# We associate each meshtag with a name + +filename = Path("mesh_with_meshtags.bp") +adios4dolfinx.write_mesh(filename, mesh) +for i, tag in meshtags.items(): + adios4dolfinx.write_meshtags(filename, mesh, tag, meshtag_name=f"meshtags_{i}") + + +# Next we want to read the meshtags in on a different number of processes, +# and check that the midpoints of each entity is still correct + + +def verify_meshtags(filename: Path): + # We assume that entity_midpoints have been sent to the engine + from mpi4py import MPI + + import dolfinx + import numpy as np + + import adios4dolfinx + + read_mesh = adios4dolfinx.read_mesh(filename, MPI.COMM_WORLD) + prefix = f"{read_mesh.comm.rank + 1}/{read_mesh.comm.size}: " + for i in range(read_mesh.topology.dim + 1): + # Read mesh from file + meshtags = adios4dolfinx.read_meshtags(filename, read_mesh, meshtag_name=f"meshtags_{i}") + + # Compute midpoints for all local entities on process + midpoints = dolfinx.mesh.compute_midpoints(read_mesh, i, meshtags.indices) + # Compare locally computed midpoint with reference data + for global_pos, midpoint in zip(meshtags.values, midpoints): + np.testing.assert_allclose( + entity_midpoints[i][global_pos], + midpoint, + err_msg=f"{prefix}: Midpoint ({i , global_pos}) do not match", + ) + print(f"{prefix} Matching of all entities of dimension {i} successful") + + +with ipp.Cluster(engines="mpi", n=3) as cluster: + cluster[:].push({"entity_midpoints": entity_midpoints}) + query = cluster[:].apply_async(verify_meshtags, filename) + query.wait() + assert query.successful(), query.error + print("".join(query.stdout)) diff --git a/docs/time_dependent_mesh.py b/docs/time_dependent_mesh.py index e6e22bd..2d6f6a5 100644 --- a/docs/time_dependent_mesh.py +++ b/docs/time_dependent_mesh.py @@ -7,6 +7,7 @@ # checkpoint to disk. # First, we create a simple function to compute the volume of a mesh + from pathlib import Path from mpi4py import MPI From 9add0c0220a20fb5829550a484ca175fddba2a05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Tue, 5 Mar 2024 13:56:52 +0100 Subject: [PATCH 26/49] Add demo writing and reading function checkpoint (#86) * Add demo writing and reading function checkpoint * Fix reference --- _toc.yml | 6 +-- docs/writing_functions_checkpoint.py | 77 ++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+), 3 deletions(-) create mode 100644 docs/writing_functions_checkpoint.py diff --git a/_toc.yml b/_toc.yml index 28809d4..f8a33f4 100644 --- a/_toc.yml +++ b/_toc.yml @@ -12,9 +12,9 @@ parts: - file: "docs/time_dependent_mesh" - file: "docs/meshtags" - # - caption: Writing and reading functions - # chapters: - # - file: "docs/writing_functions_checkpoint" + - caption: Writing and reading functions + chapters: + - file: "docs/writing_functions_checkpoint" # - file: "docs/write_on_original_mesh" - caption: Python API diff --git a/docs/writing_functions_checkpoint.py b/docs/writing_functions_checkpoint.py new file mode 100644 index 0000000..f863900 --- /dev/null +++ b/docs/writing_functions_checkpoint.py @@ -0,0 +1,77 @@ +# # Writing a function checkpoint +# In the previous sections, we have gone in to quite some detail as to how +# to store meshes with adios4dolfinx. +# This section will explain how to store functions, and how to read them back in. + +# We start by creating a mesh and an appropriate function + +from pathlib import Path + +from mpi4py import MPI + +import dolfinx +import ipyparallel as ipp + +import adios4dolfinx + +assert MPI.COMM_WORLD.size == 1, "This example should only be run with 1 MPI process" + +mesh = dolfinx.mesh.create_unit_square( + MPI.COMM_WORLD, nx=10, ny=10, cell_type=dolfinx.cpp.mesh.CellType.quadrilateral +) + +# Next, we create a function, and interpolate a polynomial function into the function space +el = "N1curl" +degree = 3 +V = dolfinx.fem.functionspace(mesh, (el, degree)) + + +def f(x): + return -(x[1] ** 2), x[0] - 2 * x[1] + + +u = dolfinx.fem.Function(V) +u.interpolate(f) + +# Next we start by storing the mesh + +filename = Path("function_checkpoint.bp") +adios4dolfinx.write_mesh(filename, mesh) + +# Next, we store the function to file, and associate it with a name. +# Note that we can also associate a time stamp with it, as done for meshes in +# [Writing time-dependent mesh checkpoint](./time_dependent_mesh) + +adios4dolfinx.write_function(filename, u, time=0.3, name="my_curl_function") + +# Next, we want to read the function back in (using multiple MPI processes) +# and check that the function is correct. + + +def read_function(filename: Path, timestamp: float): + from mpi4py import MPI + + import dolfinx + import numpy as np + + import adios4dolfinx + + in_mesh = adios4dolfinx.read_mesh(filename, MPI.COMM_WORLD) + W = dolfinx.fem.functionspace(in_mesh, (el, degree)) + u_ref = dolfinx.fem.Function(W) + u_ref.interpolate(f) + u_in = dolfinx.fem.Function(W) + adios4dolfinx.read_function(filename, u_in, time=timestamp, name="my_curl_function") + np.testing.assert_allclose(u_ref.x.array, u_in.x.array, atol=1e-14) + print( + f"{MPI.COMM_WORLD.rank + 1}/{MPI.COMM_WORLD.size}: ", + f"Function read in correctly at time {timestamp}", + ) + + +with ipp.Cluster(engines="mpi", n=3) as cluster: + cluster[:].push({"f": f, "el": el, "degree": degree}) + query = cluster[:].apply_async(read_function, filename, 0.3) + query.wait() + assert query.successful(), query.error + print("".join(query.stdout)) From 7a13c5d5369af2fdb241481aca8e8597e49c9d21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Tue, 5 Mar 2024 15:01:37 +0100 Subject: [PATCH 27/49] UPdate docs to hide log info (#87) * UPdate docs to hide log info * Ruff formatting --- _toc.yml | 1 + docs/ipyparallel_intro.py | 4 +- docs/meshtags.py | 3 +- docs/partitioned_mesh.py | 9 ++-- docs/snapshot_checkpoint.py | 70 ++++++++++++++++++++++++++++ docs/time_dependent_mesh.py | 3 +- docs/writing_functions_checkpoint.py | 3 +- docs/writing_mesh_checkpoint.py | 7 +-- 8 files changed, 89 insertions(+), 11 deletions(-) create mode 100644 docs/snapshot_checkpoint.py diff --git a/_toc.yml b/_toc.yml index f8a33f4..5ef4510 100644 --- a/_toc.yml +++ b/_toc.yml @@ -15,6 +15,7 @@ parts: - caption: Writing and reading functions chapters: - file: "docs/writing_functions_checkpoint" + - file: "docs/snapshot_checkpoint" # - file: "docs/write_on_original_mesh" - caption: Python API diff --git a/docs/ipyparallel_intro.py b/docs/ipyparallel_intro.py index c0f5b4c..69c5dc3 100644 --- a/docs/ipyparallel_intro.py +++ b/docs/ipyparallel_intro.py @@ -4,6 +4,8 @@ # We illustrate what happens in parallel by launching three MPI processes # using [ipyparallel](https://ipyparallel.readthedocs.io/en/latest/) +import logging + import ipyparallel as ipp @@ -14,7 +16,7 @@ def hello_mpi(): print(f"Hello from rank {MPI.COMM_WORLD.rank}/{MPI.COMM_WORLD.size - 1}") -with ipp.Cluster(engines="mpi", n=3) as cluster: +with ipp.Cluster(engines="mpi", n=3, log_level=logging.ERROR) as cluster: # We send the query to run the function `hello_mpi` on all engines query = cluster[:].apply_async(hello_mpi) # We wait for all engines to finish diff --git a/docs/meshtags.py b/docs/meshtags.py index 9d243df..f4e719e 100644 --- a/docs/meshtags.py +++ b/docs/meshtags.py @@ -7,6 +7,7 @@ # As an example we will use a unit-cube, where each entity has been tagged with a unique index. +import logging from pathlib import Path from mpi4py import MPI @@ -77,7 +78,7 @@ def verify_meshtags(filename: Path): print(f"{prefix} Matching of all entities of dimension {i} successful") -with ipp.Cluster(engines="mpi", n=3) as cluster: +with ipp.Cluster(engines="mpi", n=3, log_level=logging.ERROR) as cluster: cluster[:].push({"entity_midpoints": entity_midpoints}) query = cluster[:].apply_async(verify_meshtags, filename) query.wait() diff --git a/docs/partitioned_mesh.py b/docs/partitioned_mesh.py index 8ab2751..048b9a9 100644 --- a/docs/partitioned_mesh.py +++ b/docs/partitioned_mesh.py @@ -6,6 +6,7 @@ # (on a fixed number of processes), one can store the partitioning of the mesh # in the checkpoint. +import logging from pathlib import Path import ipyparallel as ipp @@ -43,7 +44,7 @@ def write_partitioned_mesh(filename: Path): n = 3 # + tags=["hide-output"] -with ipp.Cluster(engines="mpi", n=n) as cluster: +with ipp.Cluster(engines="mpi", n=n, log_level=logging.ERROR) as cluster: query = cluster[:].apply_async(write_partitioned_mesh, mesh_file) query.wait() assert query.successful(), query.error @@ -70,7 +71,7 @@ def read_partitioned_mesh(filename: Path, read_from_partition: bool = True): print(f"{prefix} Caught exception: ", e) -with ipp.Cluster(engines="mpi", n=n + 1) as cluster: +with ipp.Cluster(engines="mpi", n=n + 1, log_level=logging.ERROR) as cluster: # Read mesh from file with different number of processes query = cluster[:].apply_async(read_partitioned_mesh, mesh_file) query.wait() @@ -80,7 +81,7 @@ def read_partitioned_mesh(filename: Path, read_from_partition: bool = True): # Read mesh from file with different number of processes (not using partitioning information). # + tags=["hide-output"] -with ipp.Cluster(engines="mpi", n=n + 1) as cluster: +with ipp.Cluster(engines="mpi", n=n + 1, log_level=logging.ERROR) as cluster: query = cluster[:].apply_async(read_partitioned_mesh, mesh_file, False) query.wait() assert query.successful() @@ -91,7 +92,7 @@ def read_partitioned_mesh(filename: Path, read_from_partition: bool = True): # re-using partitioning information. # + tags=["hide-output"] -with ipp.Cluster(engines="mpi", n=n) as cluster: +with ipp.Cluster(engines="mpi", n=n, log_level=logging.ERROR) as cluster: query = cluster[:].apply_async(read_partitioned_mesh, mesh_file, True) query.wait() assert query.successful() diff --git a/docs/snapshot_checkpoint.py b/docs/snapshot_checkpoint.py new file mode 100644 index 0000000..15b9a1f --- /dev/null +++ b/docs/snapshot_checkpoint.py @@ -0,0 +1,70 @@ +# # Snapshot checkpoint (non-persistent) +# The checkpoint method described in [Writing function checkpoints](./writing_functions_checkpoint) +# are *N-to-M*, meaning that you can write them out on N-processes and read them in on M processes. +# +# As discussed in that chapter, these checkpoints need to be associated with a mesh. +# This is because the function is defined on a specific function space, which in turn is +# defined on a specific mesh. +# +# However, there are certain scenarios where you simply want to store a checkpoint associated +# with the current mesh, that should only be possible to use during this simulation. +# An example use-case is when running an iterative solver, and wanting a fall-back mechanism that +# does not require extra RAM. + +# In this example, we will demonstrate how to write a snapshot checkpoint to disk. + +# First we define a function `f` that we want to represent in the function space + +import logging +from pathlib import Path + +import ipyparallel as ipp + + +def f(x): + import numpy as np + + return np.sin(x[0]) + 0.1 * x[1] + + +# Next, we create a mesh and an appropriate function space and read and write from file +def read_write_snapshot(filename: Path): + from mpi4py import MPI + + import dolfinx + import numpy as np + + import adios4dolfinx + + mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, 3, 7, 4) + V = dolfinx.fem.functionspace(mesh, ("Lagrange", 5)) + u = dolfinx.fem.Function(V) + u.interpolate(f) + u.name = "Current_solution" + # Next, we store the solution to file + adios4dolfinx.snapshot_checkpoint(u, filename, adios4dolfinx.adios2_helpers.adios2.Mode.Write) + + # Next, we create a new function and load the solution into it + u_new = dolfinx.fem.Function(V) + u_new.name = "Read_solution" + adios4dolfinx.snapshot_checkpoint( + u_new, filename, adios4dolfinx.adios2_helpers.adios2.Mode.Read + ) + + # Next, we verify that the solution is correct + np.testing.assert_allclose(u_new.x.array, u.x.array) + + print(f"{MPI.COMM_WORLD.rank + 1}/{MPI.COMM_WORLD.size}: Successfully wrote and read snapshot") + + +mesh_file = Path("snapshot.bp") + +with ipp.Cluster(engines="mpi", n=3, log_level=logging.ERROR) as cluster: + cluster[:].push({"f": f}) + query = cluster[:].apply_async( + read_write_snapshot, + mesh_file, + ) + query.wait() + assert query.successful(), query.stderr + print("".join(query.stdout)) diff --git a/docs/time_dependent_mesh.py b/docs/time_dependent_mesh.py index 2d6f6a5..21d149d 100644 --- a/docs/time_dependent_mesh.py +++ b/docs/time_dependent_mesh.py @@ -8,6 +8,7 @@ # First, we create a simple function to compute the volume of a mesh +import logging from pathlib import Path from mpi4py import MPI @@ -67,7 +68,7 @@ def write_meshes(filename: Path): mesh_file = Path("timedep_mesh.bp") n = 3 -with ipp.Cluster(engines="mpi", n=n) as cluster: +with ipp.Cluster(engines="mpi", n=n, log_level=logging.ERROR) as cluster: # Write mesh to file cluster[:].push({"compute_volume": compute_volume}) query = cluster[:].apply_async(write_meshes, mesh_file) diff --git a/docs/writing_functions_checkpoint.py b/docs/writing_functions_checkpoint.py index f863900..c8ac912 100644 --- a/docs/writing_functions_checkpoint.py +++ b/docs/writing_functions_checkpoint.py @@ -5,6 +5,7 @@ # We start by creating a mesh and an appropriate function +import logging from pathlib import Path from mpi4py import MPI @@ -69,7 +70,7 @@ def read_function(filename: Path, timestamp: float): ) -with ipp.Cluster(engines="mpi", n=3) as cluster: +with ipp.Cluster(engines="mpi", n=3, log_level=logging.ERROR) as cluster: cluster[:].push({"f": f, "el": el, "degree": degree}) query = cluster[:].apply_async(read_function, filename, 0.3) query.wait() diff --git a/docs/writing_mesh_checkpoint.py b/docs/writing_mesh_checkpoint.py index c33da07..1805eee 100644 --- a/docs/writing_mesh_checkpoint.py +++ b/docs/writing_mesh_checkpoint.py @@ -4,6 +4,7 @@ # # We start by creating a simple unit-square mesh. +import logging from pathlib import Path from mpi4py import MPI @@ -57,7 +58,7 @@ def create_distributed_mesh(ghosted: bool, N: int = 10): # As we defined `print_mesh_info` locally on this process, we need to push it to all engines. # + tags=["hide-output"] -with ipp.Cluster(engines="mpi", n=3) as cluster: +with ipp.Cluster(engines="mpi", n=3, log_level=logging.ERROR) as cluster: # Push print_mesh_info to all engines cluster[:].push({"print_mesh_info": print_mesh_info}) @@ -109,7 +110,7 @@ def write_mesh(filename: Path): mesh_file = Path("mesh.bp") # + tags=["hide-output"] -with ipp.Cluster(engines="mpi", n=2) as cluster: +with ipp.Cluster(engines="mpi", n=2, log_level=logging.ERROR) as cluster: # Write mesh to file query = cluster[:].apply_async(write_mesh, mesh_file) query.wait() @@ -138,7 +139,7 @@ def read_mesh(filename: Path): # We can now read the checkpoint on a different number of processes than we wrote it on. # + tags=["hide-output"] -with ipp.Cluster(engines="mpi", n=4) as cluster: +with ipp.Cluster(engines="mpi", n=4, log_level=logging.ERROR) as cluster: # Write mesh to file cluster[:].push({"print_mesh_info": print_mesh_info}) query = cluster[:].apply_async(read_mesh, mesh_file) From 458c6df1f5426cb123af0ae666aec0bfc9ea3cb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Tue, 5 Mar 2024 15:20:08 +0100 Subject: [PATCH 28/49] Add more removal (#88) --- docs/partitioned_mesh.py | 5 ----- docs/writing_functions_checkpoint.py | 3 ++- docs/writing_mesh_checkpoint.py | 6 ------ 3 files changed, 2 insertions(+), 12 deletions(-) diff --git a/docs/partitioned_mesh.py b/docs/partitioned_mesh.py index 048b9a9..5dc3699 100644 --- a/docs/partitioned_mesh.py +++ b/docs/partitioned_mesh.py @@ -43,14 +43,12 @@ def write_partitioned_mesh(filename: Path): mesh_file = Path("partitioned_mesh.bp") n = 3 -# + tags=["hide-output"] with ipp.Cluster(engines="mpi", n=n, log_level=logging.ERROR) as cluster: query = cluster[:].apply_async(write_partitioned_mesh, mesh_file) query.wait() assert query.successful(), query.error print("".join(query.stdout)) -# - # # Reading a partitioned mesh # If we try to read the mesh in on a different number of processes, we will get an error @@ -80,18 +78,15 @@ def read_partitioned_mesh(filename: Path, read_from_partition: bool = True): # Read mesh from file with different number of processes (not using partitioning information). -# + tags=["hide-output"] with ipp.Cluster(engines="mpi", n=n + 1, log_level=logging.ERROR) as cluster: query = cluster[:].apply_async(read_partitioned_mesh, mesh_file, False) query.wait() assert query.successful() print("".join(query.stdout)) -# - # Read mesh from file with same number of processes as was written, # re-using partitioning information. -# + tags=["hide-output"] with ipp.Cluster(engines="mpi", n=n, log_level=logging.ERROR) as cluster: query = cluster[:].apply_async(read_partitioned_mesh, mesh_file, True) query.wait() diff --git a/docs/writing_functions_checkpoint.py b/docs/writing_functions_checkpoint.py index c8ac912..bd617c1 100644 --- a/docs/writing_functions_checkpoint.py +++ b/docs/writing_functions_checkpoint.py @@ -22,6 +22,7 @@ ) # Next, we create a function, and interpolate a polynomial function into the function space + el = "N1curl" degree = 3 V = dolfinx.fem.functionspace(mesh, (el, degree)) @@ -34,7 +35,7 @@ def f(x): u = dolfinx.fem.Function(V) u.interpolate(f) -# Next we start by storing the mesh +# For the checkpointing, we start by storing the mesh to file filename = Path("function_checkpoint.bp") adios4dolfinx.write_mesh(filename, mesh) diff --git a/docs/writing_mesh_checkpoint.py b/docs/writing_mesh_checkpoint.py index 1805eee..d4d8757 100644 --- a/docs/writing_mesh_checkpoint.py +++ b/docs/writing_mesh_checkpoint.py @@ -57,7 +57,6 @@ def create_distributed_mesh(ghosted: bool, N: int = 10): # Next we start up a new cluster with three engines. # As we defined `print_mesh_info` locally on this process, we need to push it to all engines. -# + tags=["hide-output"] with ipp.Cluster(engines="mpi", n=3, log_level=logging.ERROR) as cluster: # Push print_mesh_info to all engines cluster[:].push({"print_mesh_info": print_mesh_info}) @@ -73,7 +72,6 @@ def create_distributed_mesh(ghosted: bool, N: int = 10): assert query_false.successful(), query_false.error print("".join(query_false.stdout)) -# - # ## Writing a mesh checkpoint # The input data to a mesh is: # - A geometry: the set of points in R^D that are part of each cell @@ -109,7 +107,6 @@ def write_mesh(filename: Path): mesh_file = Path("mesh.bp") -# + tags=["hide-output"] with ipp.Cluster(engines="mpi", n=2, log_level=logging.ERROR) as cluster: # Write mesh to file query = cluster[:].apply_async(write_mesh, mesh_file) @@ -117,7 +114,6 @@ def write_mesh(filename: Path): assert query.successful(), query.error print("".join(query.stdout)) -# - # We observe that we have stored all the data needed to re-create the mesh in the file `mesh.bp`. # We can therefore read it (to any number of processes) with `adios4dolfinx.read_mesh` @@ -138,7 +134,6 @@ def read_mesh(filename: Path): # ## Reading mesh checkpoints (N-to-M) # We can now read the checkpoint on a different number of processes than we wrote it on. -# + tags=["hide-output"] with ipp.Cluster(engines="mpi", n=4, log_level=logging.ERROR) as cluster: # Write mesh to file cluster[:].push({"print_mesh_info": print_mesh_info}) @@ -146,4 +141,3 @@ def read_mesh(filename: Path): query.wait() assert query.successful(), query.error print("".join(query.stdout)) -# - From d2cf31630ab5b4f4f5e1833b16663991f79b4ace Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Tue, 5 Mar 2024 17:05:35 +0100 Subject: [PATCH 29/49] Add last documentation (#89) * Add last documentation * Minor updates * Ruff formatting --- _toc.yml | 2 +- docs/original_checkpoint.py | 137 +++++++++++++++++++++++ docs/partitioned_mesh.py | 6 +- docs/snapshot_checkpoint.py | 2 +- src/adios4dolfinx/original_checkpoint.py | 12 +- 5 files changed, 150 insertions(+), 9 deletions(-) create mode 100644 docs/original_checkpoint.py diff --git a/_toc.yml b/_toc.yml index 5ef4510..762a097 100644 --- a/_toc.yml +++ b/_toc.yml @@ -16,7 +16,7 @@ parts: chapters: - file: "docs/writing_functions_checkpoint" - file: "docs/snapshot_checkpoint" - # - file: "docs/write_on_original_mesh" + - file: "docs/original_checkpoint" - caption: Python API chapters: diff --git a/docs/original_checkpoint.py b/docs/original_checkpoint.py new file mode 100644 index 0000000..0f25e78 --- /dev/null +++ b/docs/original_checkpoint.py @@ -0,0 +1,137 @@ +# # Checkpoint on input mesh +# As we have discussed earlier, one can choose to store function data in a way that +# is N-to-M compatible by using `adios4dolfinx.write_checkpoint`. +# This stores the distributed mesh in it's current (partitioned) ordering, and does +# use the original input data ordering for the cells and connectivity. +# This means that you cannot use your original mesh (from `.xdmf` files) or mesh tags +# together with the checkpoint. The checkpoint has to store the mesh and associated +# mesh-tags. + +# An optional way of store an N-to-M checkpoint is to store the function data in the same +# ordering as the mesh. The write operation will be more expensive, as it requires data +# communication to ensure contiguous data being written to the checkpoint. +# The method is exposed as `adios4dolfinx.write_function_on_input_mesh`. +# Below we will demonstrate this method. + +import logging +from pathlib import Path +from typing import Tuple + +import ipyparallel as ipp + + +def locate_facets(x, tol=1.0e-12): + return abs(x[0]) < tol + + +def create_xdmf_mesh(filename: Path): + from mpi4py import MPI + + import dolfinx + + mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 10, 10) + facets = dolfinx.mesh.locate_entities_boundary(mesh, mesh.topology.dim - 1, locate_facets) + facet_tag = dolfinx.mesh.meshtags(mesh, mesh.topology.dim - 1, facets, 1) + facet_tag.name = "FacetTag" + with dolfinx.io.XDMFFile(MPI.COMM_WORLD, filename.with_suffix(".xdmf"), "w") as xdmf: + xdmf.write_mesh(mesh) + xdmf.write_meshtags(facet_tag, mesh.geometry) + print(f"{mesh.comm.rank+1}/{mesh.comm.size} Mesh written to {filename.with_suffix('.xdmf')}") + + +mesh_file = Path("MyMesh.xdmf") +with ipp.Cluster(engines="mpi", n=4, log_level=logging.ERROR) as cluster: + # Create a mesh and write to XDMFFile + cluster[:].push({"locate_facets": locate_facets}) + query = cluster[:].apply_async(create_xdmf_mesh, mesh_file) + query.wait() + assert query.successful(), query.error + print("".join(query.stdout)) + + +# Next, we will create a function on the mesh and write it to a checkpoint. + + +def f(x): + return (x[0] + x[1]) * (x[0] < 0.5), x[1], x[2] - x[1] + + +def write_function( + mesh_filename: Path, function_filename: Path, element: Tuple[str, int, Tuple[int,]] +): + from mpi4py import MPI + + import dolfinx + + import adios4dolfinx + + with dolfinx.io.XDMFFile(MPI.COMM_WORLD, mesh_filename, "r") as xdmf: + mesh = xdmf.read_mesh() + V = dolfinx.fem.functionspace(mesh, element) + u = dolfinx.fem.Function(V) + u.interpolate(f) + + adios4dolfinx.write_function_on_input_mesh( + function_filename.with_suffix(".bp"), + u, + mode=adios4dolfinx.adios2_helpers.adios2.Mode.Write, + time=0.0, + name="Output", + ) + print( + f"{mesh.comm.rank+1}/{mesh.comm.size} Function written to ", + f"{function_filename.with_suffix('.bp')}", + ) + + +# Read in mesh and write function to file + +element = ("DG", 4, (3,)) +function_file = Path("MyFunction.bp") +with ipp.Cluster(engines="mpi", n=2, log_level=logging.ERROR) as cluster: + cluster[:].push({"f": f}) + query = cluster[:].apply_async(write_function, mesh_file, function_file, element) + query.wait() + assert query.successful(), query.error + print("".join(query.stdout)) + + +# Finally, we will read in the mesh from file and the function from the checkpoint +# and compare it with the analytical solution. + + +def verify_checkpoint( + mesh_filename: Path, function_filename: Path, element: Tuple[str, int, Tuple[int,]] +): + from mpi4py import MPI + + import dolfinx + import numpy as np + + import adios4dolfinx + + with dolfinx.io.XDMFFile(MPI.COMM_WORLD, mesh_filename, "r") as xdmf: + in_mesh = xdmf.read_mesh() + V = dolfinx.fem.functionspace(in_mesh, element) + u_in = dolfinx.fem.Function(V) + adios4dolfinx.read_function(function_filename.with_suffix(".bp"), u_in, time=0.0, name="Output") + + # Compute exact interpolation + u_ex = dolfinx.fem.Function(V) + u_ex.interpolate(f) + + np.testing.assert_allclose(u_in.x.array, u_ex.x.array) + print( + "Successfully read checkpoint onto mesh on rank ", + f"{in_mesh.comm.rank + 1}/{in_mesh.comm.size}", + ) + + +# Verify checkpoint by comparing to exact solution + +with ipp.Cluster(engines="mpi", n=5, log_level=logging.ERROR) as cluster: + cluster[:].push({"f": f}) + query = cluster[:].apply_async(verify_checkpoint, mesh_file, function_file, element) + query.wait() + assert query.successful(), query.error + print("".join(query.stdout)) diff --git a/docs/partitioned_mesh.py b/docs/partitioned_mesh.py index 5dc3699..8bbb22a 100644 --- a/docs/partitioned_mesh.py +++ b/docs/partitioned_mesh.py @@ -73,7 +73,7 @@ def read_partitioned_mesh(filename: Path, read_from_partition: bool = True): # Read mesh from file with different number of processes query = cluster[:].apply_async(read_partitioned_mesh, mesh_file) query.wait() - assert query.successful() + assert query.successful(), query.error print("".join(query.stdout)) # Read mesh from file with different number of processes (not using partitioning information). @@ -81,7 +81,7 @@ def read_partitioned_mesh(filename: Path, read_from_partition: bool = True): with ipp.Cluster(engines="mpi", n=n + 1, log_level=logging.ERROR) as cluster: query = cluster[:].apply_async(read_partitioned_mesh, mesh_file, False) query.wait() - assert query.successful() + assert query.successful(), query.error print("".join(query.stdout)) # Read mesh from file with same number of processes as was written, @@ -90,5 +90,5 @@ def read_partitioned_mesh(filename: Path, read_from_partition: bool = True): with ipp.Cluster(engines="mpi", n=n, log_level=logging.ERROR) as cluster: query = cluster[:].apply_async(read_partitioned_mesh, mesh_file, True) query.wait() - assert query.successful() + assert query.successful(), query.error print("".join(query.stdout)) diff --git a/docs/snapshot_checkpoint.py b/docs/snapshot_checkpoint.py index 15b9a1f..4169e9b 100644 --- a/docs/snapshot_checkpoint.py +++ b/docs/snapshot_checkpoint.py @@ -66,5 +66,5 @@ def read_write_snapshot(filename: Path): mesh_file, ) query.wait() - assert query.successful(), query.stderr + assert query.successful(), query.error print("".join(query.stdout)) diff --git a/src/adios4dolfinx/original_checkpoint.py b/src/adios4dolfinx/original_checkpoint.py index b373bcc..c993bd2 100644 --- a/src/adios4dolfinx/original_checkpoint.py +++ b/src/adios4dolfinx/original_checkpoint.py @@ -198,7 +198,9 @@ def create_original_mesh_data(mesh: dolfinx.mesh.Mesh) -> MeshData: ) -def create_function_data_on_original_mesh(u: dolfinx.fem.Function) -> FunctionData: +def create_function_data_on_original_mesh( + u: dolfinx.fem.Function, name: str | None = None +) -> FunctionData: """ Create data object to save with ADIOS2 """ @@ -308,6 +310,7 @@ def create_function_data_on_original_mesh(u: dolfinx.fem.Function) -> FunctionDa num_dofs_local = dofmap.index_map.size_local * dofmap.index_map_bs num_dofs_global = dofmap.index_map.size_global * dofmap.index_map_bs local_range = np.asarray(dofmap.index_map.local_range, dtype=np.int64) * dofmap.index_map_bs + func_name = name if name is not None else u.name return FunctionData( cell_permutations=cell_permutation_info, local_cell_range=local_cell_range, @@ -319,7 +322,7 @@ def create_function_data_on_original_mesh(u: dolfinx.fem.Function) -> FunctionDa num_dofs_global=num_dofs_global, dofmap_range=dofmap_imap.local_range, global_dofs_in_dofmap=dofmap_imap.size_global, - name=u.name, + name=func_name, ) @@ -329,6 +332,7 @@ def write_function_on_input_mesh( engine: str = "BP4", mode: adios2.Mode = adios2.Mode.Append, time: float = 0.0, + name: str | None = None, ): """ Write function checkpoint (to be read with the input mesh). @@ -339,10 +343,10 @@ def write_function_on_input_mesh( engine: The ADIOS2 engine to use mode: The ADIOS2 mode to use (write or append) time: Time-stamp associated with function at current write step - + name: Name of function. If None, the name of the function is used. """ mesh = u.function_space.mesh - function_data = create_function_data_on_original_mesh(u) + function_data = create_function_data_on_original_mesh(u, name) fname = Path(filename) write_function( fname, From 4046323ea9e4e6b027cbb8b20c3e35dfe338b272 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Wed, 6 Mar 2024 15:38:31 +0100 Subject: [PATCH 30/49] Draft for JOSS (#90) * Add first draft of joss paper * Modify CI * Remove API specific text * Add sentence regarding mpi collectives in summary * Move MPI collectives up * Another readthrough * Some doi fixes and to author list and cursive --- .github/workflows/build_joss_paper.yml | 22 +++++++ joss-paper/README.md | 6 ++ joss-paper/joss-checklist.md | 36 +++++++++++ joss-paper/paper.bib | 78 +++++++++++++++++++++++ joss-paper/paper.md | 87 ++++++++++++++++++++++++++ 5 files changed, 229 insertions(+) create mode 100644 .github/workflows/build_joss_paper.yml create mode 100644 joss-paper/README.md create mode 100644 joss-paper/joss-checklist.md create mode 100644 joss-paper/paper.bib create mode 100644 joss-paper/paper.md diff --git a/.github/workflows/build_joss_paper.yml b/.github/workflows/build_joss_paper.yml new file mode 100644 index 0000000..d650b1f --- /dev/null +++ b/.github/workflows/build_joss_paper.yml @@ -0,0 +1,22 @@ +name: Build JOSS paper +on: [push] + +jobs: + paper: + runs-on: ubuntu-latest + name: Paper Draft + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Build draft PDF + uses: openjournals/openjournals-draft-action@master + with: + journal: joss + paper-path: ./joss-paper/paper.md + + - name: Upload + uses: actions/upload-artifact@v4 + with: + name: paper + path: ./joss-paper/paper.pdf \ No newline at end of file diff --git a/joss-paper/README.md b/joss-paper/README.md new file mode 100644 index 0000000..6caa561 --- /dev/null +++ b/joss-paper/README.md @@ -0,0 +1,6 @@ +# How to generate paper + +```python + docker run --rm --volume $(pwd):/data --user $(id -u):$(id -g) --env JOURNAL=joss openjournals/inara +``` + diff --git a/joss-paper/joss-checklist.md b/joss-paper/joss-checklist.md new file mode 100644 index 0000000..e978b17 --- /dev/null +++ b/joss-paper/joss-checklist.md @@ -0,0 +1,36 @@ +### Conflict of interest + +- [x] I confirm that I have read the [JOSS conflict of interest policy](https://joss.readthedocs.io/en/latest/submitting.html#conflict-of-interest-policy-for-authors) and that: I have no COIs with reviewing this work or that any perceived COIs have been waived by JOSS for the purpose of this review. + +### Code of Conduct + +- [x] I confirm that I read and will adhere to the [JOSS code of conduct](https://joss.theoj.org/about#code_of_conduct). + +### General checks + +- [x] **Repository:** Is the source code for this software available at the repository url? +- [x] **License:** Does the repository contain a plain-text LICENSE file with the contents of an [OSI approved](https://opensource.org/licenses/alphabetical) software license? +- [x] **Contribution and authorship:** Has the submitting author made major contributions to the software? Does the full list of paper authors seem appropriate and complete? + +### Functionality + +- [x] **Installation:** Does installation proceed as outlined in the documentation? +- [x] **Functionality:** Have the functional claims of the software been confirmed? +- [ ] **Performance:** If there are any performance claims of the software, have they been confirmed? (If there are no claims, please check off this item.) + +### Documentation + +- [x] **A statement of need:** Do the authors clearly state what problems the software is designed to solve and who the target audience is? +- [x] **Installation instructions:** Is there a clearly-stated list of dependencies? Ideally these should be handled with an automated package management solution. +- [x] **Example usage:** Do the authors include examples of how to use the software (ideally to solve real-world analysis problems). +- [x] **Functionality documentation:** Is the core functionality of the software documented to a satisfactory level (e.g., API method documentation)? +- [x] **Automated tests:** Are there automated tests or manual steps described so that the functionality of the software can be verified? +- [x] **Community guidelines:** Are there clear guidelines for third parties wishing to 1) Contribute to the software 2) Report issues or problems with the software 3) Seek support + +### Software paper + +- [x] **Summary:** Has a clear description of the high-level functionality and purpose of the software for a diverse, non-specialist audience been provided? +- [x] **A statement of need:** Does the paper have a section titled 'Statement of need' that clearly states what problems the software is designed to solve, who the target audience is, and its relation to other work? +- [x] **State of the field:** Do the authors describe how this software compares to other commonly-used packages? +- [x] **Quality of writing:** Is the paper well written (i.e., it does not require editing for structure, language, or writing quality)? +- [x] **References:** Is the list of references complete, and is everything cited appropriately that should be cited (e.g., papers, datasets, software)? Do references in the text use the proper [citation syntax]( https://rmarkdown.rstudio.com/authoring_bibliographies_and_citations.html#citation_syntax)? diff --git a/joss-paper/paper.bib b/joss-paper/paper.bib new file mode 100644 index 0000000..c079b46 --- /dev/null +++ b/joss-paper/paper.bib @@ -0,0 +1,78 @@ +@unpublished{Baratta:2023, + author = {Baratta, Igor A. and Dean, Joseph P. and Dokken, Jørgen S. and Habera, Michal and Hale, Jack and Richardson, Chris N. and Rognes, Marie E. and Scroggs, Matthew W. and Sime, Nathan and Wells, Garth N.}, + title = {DOLFINx: The next generation FEniCS problem solving environment}, + language = {English}, + year = {2023}, + doi = {10.5281/zenodo.10447666} +} + + +@article{Godoy:2020, + title = {ADIOS 2: The Adaptable Input Output System. A framework for high-performance data management}, + journal = {SoftwareX}, + volume = {12}, + pages = {100561}, + year = {2020}, + issn = {2352-7110}, + doi = {10.1016/j.softx.2020.100561}, + author = {William F. Godoy and Norbert Podhorszki and Ruonan Wang and Chuck Atkins and Greg Eisenhauer and Junmin Gu and Philip Davis and Jong Choi and Kai Germaschewski and Kevin Huck and Axel Huebl and Mark Kim and James Kress and Tahsin Kurc and Qing Liu and Jeremy Logan and Kshitij Mehta and George Ostrouchov and Manish Parashar and Franz Poeschel and David Pugmire and Eric Suchyta and Keichi Takahashi and Nick Thompson and Seiji Tsutsumi and Lipeng Wan and Matthew Wolf and Kesheng Wu and Scott Klasky} +} + + +@conference{Habera:2018, + author = {Habera, Michal and Zilian, Andreas and Hale, Jack and Richardson, Chris N. and Blechta, Jan and Dave, Demarle}, + year = {2018}, + title = {{XDMF and ParaView: checkpointing format}}, + booktitle = {{FEniCS Confernce 2018: Book of Abstracts}}, + url = {https://hdl.handle.net/10993/35848} +} + +@misc{Ham:2024, + title = {Efficient N-to-M Checkpointing Algorithm for Finite Element Simulations}, + author = {David A. Ham and Vaclav Hapla and Matthew G. Knepley and Lawrence Mitchell and Koki Sagiyama}, + year = {2024}, + eprint = {2401.05868}, + archiveprefix = {arXiv}, + doi = {10.48550/arXiv.2401.05868} +} + + +@article{Rathgeber:2016, + author = {Rathgeber, Florian and Ham, David A. and Mitchell, Lawrence and Lange, Michael and Luporini, Fabio and Mcrae, Andrew T. T. and Bercea, Gheorghe-Teodor and Markall, Graham R. and Kelly, Paul H. J.}, + title = {Firedrake: Automating the Finite Element Method by Composing Abstractions}, + year = {2016}, + issue_date = {September 2017}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + volume = {43}, + number = {3}, + issn = {0098-3500}, + doi = {10.1145/2998441}, + journal = {ACM Trans. Math. Softw.}, + month = {dec}, + articleno = {24}, + numpages = {27} +} + +@article{Scroggs:2022, + author = {Scroggs, Matthew W. and Dokken, J\o{}rgen S. and Richardson, Chris N. and Wells, Garth N.}, + title = {Construction of Arbitrary Order Finite Element Degree-of-Freedom Maps on Polygonal and Polyhedral Cell Meshes}, + year = {2022}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + volume = {48}, + number = {2}, + issn = {0098-3500}, + doi = {10.1145/3524456}, + journal = {ACM Trans. Math. Softw.}, + month = {may}, + articleno = {18}, + numpages = {23} +} + +@misc{MPI-Forum:2012, + author = {MPI-Forum}, + year = {2012}, + title = {{MPI: A Message-Passing Interface Standard. Version 3.0}}, + url = {https://www.mpi-forum.org/docs/mpi-3.0/mpi30-report.pdf} +} \ No newline at end of file diff --git a/joss-paper/paper.md b/joss-paper/paper.md new file mode 100644 index 0000000..b23e3b2 --- /dev/null +++ b/joss-paper/paper.md @@ -0,0 +1,87 @@ +--- +title: 'ADIOS4DOLFINx: A framework for checkpointing in FEniCS' +tags: + - Python + - finite element simulations + - checkpointing +authors: + - name: Jørgen Schartum Dokken + orcid: 0000-0001-6489-8858 + corresponding: true + affiliation: 1 +affiliations: + - name: Simula Research Laboratory + index: 1 +date: 6 March 2024 +bibliography: paper.bib + +--- + +# Summary + +We introduce a checkpointing framework for the latest version of the FEniCS project, known as DOLFINx. +The framework leverages the data-centric approach of DOLFINx along with a state of the art adaptable Input/Output system called ADIOS2. +Several variations of checkpointing are supported, including *N-to-M* checkpointing of function data, storage of mesh partitioning information for *N-to-N* checkpointing and snapshot checkpointing for RAM reduction during simulation. +All MPI operations are using MPI-3 Neighborhood collectives. + +# Statement of need + +The ability to start, stop and resume simulations is becoming increasingly important with the growing use of supercomputers for solving scientific and engineering problems. +A rising number of large scale problems are deployed on high performance, memory distributed computing systems and users tend to run more demanding simulations. +These are often non-linear and time-dependent, which typically amounts to thousands of CPU hours. +As it might uncover bugs and unphysical solutions, the ability to run parts of the simulation, inspect the result and then resume simulation becomes a key factor to enable efficient development. +If this is discovered early on, the simulation can be terminated saving the developer time, money and energy-usage. + +The proposed framework enables users of the FEniCS project [@Baratta:2023] to store solutions during simulation, and read them in at their convenience to resume simulations at a later stage. +Several checkpointing methods are implemented, including *N-to-M* checkpointing, which means saving data from a program executed with N processes, and loading it back in on M processes. + +Functionality for *N-to-M* checkpointing was implemented for the old version of DOLFIN by [@Habera:2018]. +However, this functionality is not present in the newest version of the FEniCS Project [@Baratta:2023]. +The storage principles in the ADIOS4DOLFINx are based on the ideas present in this implementation. +However, the implementation for non-Lagrangian finite element spaces vastly differs, due to the usage of dof-permutations [@Scroggs:2022]. +Additionally, all global MPI-calls in the old implementation have been reimplemented with scalable MPI-communication using the MPI-3 Neighborhood Collectives [@MPI-Forum:2012]. + +The framework introduces several new methods for storing partitioning information for *N-to-N* checkpointing with arbitrary ghosting, as well as very lightweight snapshot checkpoints. +A similar framework for *N-to-M* checkpointing was implemented by [@Ham:2024] for the finite element framework Firedrake [@Rathgeber:2016]. +This frameworks differs from the one used in ADIOS4DOLFINx in several ways due to the different internal structures of DOLFINx and Firedrake. + +# Functionality + +The software is written as a Python-extension to DOLFINx, which can be installed using the Python Package installer `pip` directly from the Github repository or using the [ADIOS4DOLFINx](https://pypi.org/project/adios4dolfinx/) from the Python Package Index. +The following features are supported: + +- Snapshot checkpointing +- *N-to-M* checkpointing with mesh storage +- *N-to-M* checkpointing without mesh storage +- *N-to-N* checkpointing storing partitioning information + +A *snapshot checkpoint* is a checkpoint that is only valid during the run of a simulation. +It is lightweight (only stores the local portion of the global dof array to file), and is stored using the *Local Array* feature in ADIOS2 [@Godoy:2020] to store data local to the MPI process. +This feature is intended for use-cases where many solutions have to be aggregated to the end of a simulation to some post-processing step, or as a fall-back mechanism when restarting a diverging iterative solver. + +A *N-to-M* checkpoint is a checkpoint that can be written with N processes and read back in with M processes. +Two versions of this checkpoint is supported; One where storage of the mesh is required and without mesh storage. +The reasoning for such a split is that when a mesh is read into DOLFINx and passed to an appropriate partitioner, the ordering mesh nodes (coordinates) and connectivity (cells) is changed. +Writing these back into *global arrays* requires MPI communication to ensure contiguous writing of data. + +The *N-to-M* checkpoint with mesh storage exclusively writes contiguous chunks of data owned by the current process to an ADIOS2 *Global Array* that can be read in with a different number of processes at a later stage. +This operation requires no MPI-communication. + +In many cases, the input mesh might stem from an external mesh generator and is stored together with mesh entity markers in an external file, for instance an XDMF-file. +To avoid duplication of this mesh data, a stand-alone file that can be associated with the XDMF file for a later restart can be created. +This method requires some MPI neighborhood collective calls to move data from the process that currently owns it to the relevant process for that stores it as a *Global Array* in contiguous chunks. +Both *N-to-M* checkpoint routines uses the same API to read in checkpoints at a later instance. + +In certain scenarios, mesh partitioning might be time-consuming, as a developer is running the same problem over and over again with the same number of processes. +As DOLFINx supports custom partitioning [@Baratta:2023], we use this feature to read in partition data from a previous run. +As opposed to the checkpoints in the old version of DOLFIN, these checkpoints handle any ghosting, that being a custom ghosting provided by the user, or the shared-facet mode provided by DOLFINx. + +# Examples +A large variety of examples covering all the functions in adios4dolfinx is available at [https://jorgensd.github.io/adios4dolfinx](https://jorgensd.github.io/adios4dolfinx). + +# Acknowledgements + +We acknowledge the valuable feedback on the documentation and manuscript by Thomas M. Surowiec and Halvor Herlyng. +Additionally, we acknowledge the scientific discussion regarding feature development and code contributions by Henrik N. Finsberg and Francesco Ballarin. + +# References \ No newline at end of file From db2933f6735566fcc9f33dc342abb665555b5e68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Wed, 6 Mar 2024 19:15:21 +0100 Subject: [PATCH 31/49] Add nate to acknowledgements (#91) --- joss-paper/paper.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/joss-paper/paper.md b/joss-paper/paper.md index b23e3b2..6cfb230 100644 --- a/joss-paper/paper.md +++ b/joss-paper/paper.md @@ -82,6 +82,6 @@ A large variety of examples covering all the functions in adios4dolfinx is avail # Acknowledgements We acknowledge the valuable feedback on the documentation and manuscript by Thomas M. Surowiec and Halvor Herlyng. -Additionally, we acknowledge the scientific discussion regarding feature development and code contributions by Henrik N. Finsberg and Francesco Ballarin. +Additionally, we acknowledge the scientific discussion regarding feature development and code contributions by Francesco Ballarin, Henrik N. Finsberg and Nathan Sime. # References \ No newline at end of file From a0e4f5ffd930bc244ffca72fbc75f477bc3d1e93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Thu, 7 Mar 2024 10:00:37 +0100 Subject: [PATCH 32/49] Remove typo in reference and use full name journals (#93) --- joss-paper/paper.bib | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/joss-paper/paper.bib b/joss-paper/paper.bib index c079b46..51c652e 100644 --- a/joss-paper/paper.bib +++ b/joss-paper/paper.bib @@ -23,7 +23,7 @@ @conference{Habera:2018 author = {Habera, Michal and Zilian, Andreas and Hale, Jack and Richardson, Chris N. and Blechta, Jan and Dave, Demarle}, year = {2018}, title = {{XDMF and ParaView: checkpointing format}}, - booktitle = {{FEniCS Confernce 2018: Book of Abstracts}}, + booktitle = {{FEniCS Conference 2018: Book of Abstracts}}, url = {https://hdl.handle.net/10993/35848} } @@ -48,7 +48,7 @@ @article{Rathgeber:2016 number = {3}, issn = {0098-3500}, doi = {10.1145/2998441}, - journal = {ACM Trans. Math. Softw.}, + journal = {ACM Transactions on Mathematical Software}, month = {dec}, articleno = {24}, numpages = {27} @@ -64,7 +64,7 @@ @article{Scroggs:2022 number = {2}, issn = {0098-3500}, doi = {10.1145/3524456}, - journal = {ACM Trans. Math. Softw.}, + journal = {ACM Transactions on Mathematical Software}, month = {may}, articleno = {18}, numpages = {23} From 9a677fefd1d5e89f6a926122929d5cd464cf22eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Sat, 9 Mar 2024 08:52:40 +0100 Subject: [PATCH 33/49] Remove expensive python loop for dof transformations by using new python binding in DOLFINx (#92) * Remove expensive python loop for dof transformations by moving it to DOLFINx C++ interface, see: https://github.com/FEniCS/dolfinx/pull/3090 * Ruff formatting * Slightly improve inverted permutation as we know we are using integer indices and a unique set of indices. Move import to top of file. * Apply suggestions from code review --- .github/workflows/test_package_openmpi.yml | 1 + src/adios4dolfinx/checkpointing.py | 41 ++++++++++++++++------ 2 files changed, 31 insertions(+), 11 deletions(-) diff --git a/.github/workflows/test_package_openmpi.yml b/.github/workflows/test_package_openmpi.yml index a65c412..ec8d143 100644 --- a/.github/workflows/test_package_openmpi.yml +++ b/.github/workflows/test_package_openmpi.yml @@ -52,6 +52,7 @@ jobs: with: adios2: ${{ matrix.adios2 }} petsc_arch: ${{ env.PETSC_ARCH }} + dolfinx: main - name: Download legacy data uses: actions/download-artifact@v4 diff --git a/src/adios4dolfinx/checkpointing.py b/src/adios4dolfinx/checkpointing.py index 9bf2c62..3e1f8fb 100644 --- a/src/adios4dolfinx/checkpointing.py +++ b/src/adios4dolfinx/checkpointing.py @@ -30,7 +30,13 @@ send_dofs_and_recv_values, ) from .structures import FunctionData, MeshData -from .utils import compute_dofmap_pos, compute_local_range, index_owner, unroll_dofmap +from .utils import ( + compute_dofmap_pos, + compute_local_range, + index_owner, + unroll_dofmap, + unroll_insert_position, +) from .writers import write_function as _internal_function_writer from .writers import write_mesh as _internal_mesh_writer @@ -376,18 +382,31 @@ def read_function( input_perms = read_cell_perms( adios, comm, filename, "CellPermutations", num_cells_global, engine ) + # Start by sorting data array by cell permutation + num_dofs_per_cell = input_dofmap.offsets[1:] - input_dofmap.offsets[:-1] + assert np.allclose(num_dofs_per_cell, num_dofs_per_cell[0]) + + # Sort dofmap by input local cell index + input_perms_sorted = input_perms[input_local_cell_index] + unrolled_dofmap_position = unroll_insert_position( + input_local_cell_index, num_dofs_per_cell[0] + ) + dofmap_sorted_by_input = recv_array[unrolled_dofmap_position] # First invert input data to reference element then transform to current mesh - for i, l_cell in enumerate(input_local_cell_index): - start, end = input_dofmap.offsets[l_cell : l_cell + 2] - # FIXME: Tempoary cast uint32 to integer as transformations - # doesn't support uint32 with the switch to nanobind - element.pre_apply_transpose_dof_transformation( - recv_array[int(start) : int(end)], int(input_perms[l_cell]), bs - ) - element.pre_apply_inverse_transpose_dof_transformation( - recv_array[int(start) : int(end)], int(inc_perms[i]), bs - ) + element.pre_apply_transpose_dof_transformation( + dofmap_sorted_by_input, input_perms_sorted, bs + ) + element.pre_apply_inverse_transpose_dof_transformation( + dofmap_sorted_by_input, inc_perms, bs + ) + # Compute invert permutation + inverted_perm = np.empty_like(unrolled_dofmap_position) + inverted_perm[unrolled_dofmap_position] = np.arange( + len(unrolled_dofmap_position), dtype=inverted_perm.dtype + ) + recv_array = dofmap_sorted_by_input[inverted_perm] + # ------------------Step 6---------------------------------------- # For each dof owned by a process, find the local position in the dofmap. V = u.function_space From 45203a491f06dacc1c52e173819bd9a521aaa7cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Sat, 9 Mar 2024 09:44:23 +0100 Subject: [PATCH 34/49] Update readme and remove flake8 (#94) * Remove expensive python loop for dof transformations by moving it to DOLFINx C++ interface, see: https://github.com/FEniCS/dolfinx/pull/3090 * Ruff formatting * Slightly improve inverted permutation as we know we are using integer indices and a unique set of indices. Move import to top of file. * Remove flake8 and add more info to readme * Move contributing guidelines * ADd joss in review badge --- .flake8 | 4 --- CONTRIBUTING.md | 42 +++++++++++++++++++++++++ README.md | 84 +++++++++++++++++++------------------------------ 3 files changed, 74 insertions(+), 56 deletions(-) delete mode 100644 .flake8 create mode 100644 CONTRIBUTING.md diff --git a/.flake8 b/.flake8 deleted file mode 100644 index e4a7afb..0000000 --- a/.flake8 +++ /dev/null @@ -1,4 +0,0 @@ -# flake8 does not support pyproject.toml, see: -# https://github.com/PyCQA/flake8/issues/234 -[flake8] -max-line-length = 120 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..5c64d50 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,42 @@ +# Contributor guidelines +When contributing to this repository, please first [create an issue](https://github.com/jorgensd/adios4dolfinx/issues/new/choose) containing information about the missing feature or the bug that you would like to fix. Here you can discuss the change you want to make with the maintainers of the repository. + +Please note we have a code of conduct, please follow it in all your interactions with the project. + +## New contributor guide + +To get an overview of the project, read the [documentation](https://jorgensd.github.io/adios4dolfinx). Here are some resources to help you get started with open source contributions: + +- [Finding ways to contribute to open source on GitHub](https://docs.github.com/en/get-started/exploring-projects-on-github/finding-ways-to-contribute-to-open-source-on-github) +- [Set up Git](https://docs.github.com/en/get-started/quickstart/set-up-git) +- [GitHub flow](https://docs.github.com/en/get-started/quickstart/github-flow) +- [Collaborating with pull requests](https://docs.github.com/en/github/collaborating-with-pull-requests) + +## Pull Request Process + + +### Pull Request + +- When you're finished with the changes, create a pull request, also known as a PR. It is also OK to create a [draft pull request](https://github.blog/2019-02-14-introducing-draft-pull-requests/) from the very beginning. Once you are done you can click on the ["Ready for review"] button. You can also [request a review](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/requesting-a-pull-request-review) from one of the maintainers. +- Don't forget to [link PR to the issue that you opened ](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue). +- Enable the checkbox to [allow maintainer edits](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/allowing-changes-to-a-pull-request-branch-created-from-a-fork) so the branch can be updated for a merge. +Once you submit your PR, a team member will review your proposal. We may ask questions or request for additional information. +- We may ask for changes to be made before a PR can be merged, either using [suggested changes](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/incorporating-feedback-in-your-pull-request) or pull request comments. You can apply suggested changes directly through the UI. You can make any other changes in your fork, then commit them to your branch. +- As you update your PR and apply changes, mark each conversation as [resolved](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/commenting-on-a-pull-request#resolving-conversations). +- If you run into any merge issues, checkout this [git tutorial](https://lab.github.com/githubtraining/managing-merge-conflicts) to help you resolve merge conflicts and other issues. +- Please make sure that all tests are passing, github pages renders nicely, and code coverage are are not lower than before your contribution. You see the different github action workflows by clicking the "Action" tab in the GitHub repository. + +Note that for a pull request to be accepted, it has to pass all the tests on CI, which includes: +- `mypy`: typechecking +- `ruff`: Code formatting +- `pytest`: Successfull execution of all tests in the `tests` folder. + + +### Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. diff --git a/README.md b/README.md index 8c0d451..f33153a 100644 --- a/README.md +++ b/README.md @@ -1,34 +1,57 @@ -# ADIOS2Wrappers for DOLFINx +# ADIOS4DOLFINx - A framework for checkpointing in DOLFINx -[![MIT](https://img.shields.io/github/license/jorgensd/adios4dolfinx)](LICENSE) -[Read Latest Documentation](https://jsdokken.com/adios4dolfinx/) +![MIT](https://img.shields.io/github/license/jorgensd/adios4dolfinx) +[![status](https://joss.theoj.org/papers/7866cb142db8a803e32d79a109573d25/status.svg)](https://joss.theoj.org/papers/7866cb142db8a803e32d79a109573d25) -This is an extension for [DOLFINx](https://github.com/FEniCS/dolfinx/) to checkpoint meshes, meshtags and functions using [ADIOS2](https://adios2.readthedocs.io/en/latest/). -The code uses the adios2 Python-wrappers to write DOLFINx objects to file, supporting N-to-M (_recoverable_) and N-to-N (_snapshot_) checkpointing. -See: [Checkpointing in DOLFINx - FEniCS 23](https://jsdokken.com/checkpointing-presentation/#/) for more information. +ADIOS4DOLFINx is an extension for [DOLFINx](https://github.com/FEniCS/dolfinx/) to checkpoint meshes, meshtags and functions using [ADIOS 2](https://adios2.readthedocs.io/en/latest/). + +The code uses the ADIOS2 Python-wrappers to write DOLFINx objects to file, supporting N-to-M (_recoverable_) and N-to-N (_snapshot_) checkpointing. +See: [Checkpointing in DOLFINx - FEniCS 23](https://jsdokken.com/checkpointing-presentation/#/) or the examples in thee [Documentation](https://jsdokken.com/adios4dolfinx/) for more information. For scalability, the code uses [MPI Neighbourhood collectives](https://www.mpi-forum.org/docs/mpi-3.1/mpi31-report/node200.htm) for communication across processes. ## Installation +Compatibility with DOLFINx: +- ADIOS4DOLFINx v0.7.3 is compatible with DOLFINx v0.7.x +- ADIOS4DOLFINx v0.8.x is compatible with the main branch of DOLFINx ### Docker -ADIOS2 is installed in the official DOLFINx containers. +ADIOS2 is installed in the official DOLFINx containers, and thus there are no additional dependencies required to install `adios4dolfinx` +on top of DOLFINx in these images. +Create a Docker container, named for instance `dolfinx-checkpoint`. +Use the `nightly` tag to get the main branch of DOLFINx, or `stable` to get the lastest stable release ```bash docker run -ti -v $(pwd):/root/shared -w /root/shared --name=dolfinx-checkpoint ghcr.io/fenics/dolfinx/dolfinx:nightly ``` +For the latest version compatible with nightly, use +```bash +python3 -m pip install git+https://github.com/jorgensd/adios4dolfinx@main +``` +If you are using the `stable` image, you can install `adios4dolfinx` from [PYPI](https://pypi.org/project/adios4dolfinx/) with +```bash +python3 -m pip install adios4dolfinx +``` + +This docker container can be opened with +```bash +docker container start -i dolfinx-checkpoint +``` +at a later instance ### Conda -To use with conda (DOLFINx release v0.7.0 works with v0.7.3 of ADIOS4DOLFINx) +> [!NOTE] +> Conda supports the stable release of DOLFINx, and thus the appropriate version should be installed, see the section above for more details. +Following is a minimal recipe of how to install adios4dolfinx, given that you have conda installed on your system. ```bash conda create -n dolfinx-checkpoint python=3.10 conda activate dolfinx-checkpoint conda install -c conda-forge fenics-dolfinx pip adios2 -python3 -m pip install git+https://github.com/jorgensd/adios4dolfinx@v0.7.2 +python3 -m pip install git+https://github.com/jorgensd/adios4dolfinx@v0.7.3 ``` ## Functionality @@ -68,46 +91,3 @@ See the [API](./docs/api) for more information. ## Long term plan The long term plan is to get this library merged into DOLFINx (rewritten in C++ with appropriate Python-bindings). - -# Contributor guidelines -When contributing to this repository, please first [create an issue](https://github.com/jorgensd/adios4dolfinx/issues/new/choose) containing information about the missing feature or the bug that you would like to fix. Here you can discuss the change you want to make with the maintainers of the repository. - -Please note we have a code of conduct, please follow it in all your interactions with the project. - -## New contributor guide - -To get an overview of the project, read the [documentation](https://jorgensd.github.io/adios4dolfinx). Here are some resources to help you get started with open source contributions: - -- [Finding ways to contribute to open source on GitHub](https://docs.github.com/en/get-started/exploring-projects-on-github/finding-ways-to-contribute-to-open-source-on-github) -- [Set up Git](https://docs.github.com/en/get-started/quickstart/set-up-git) -- [GitHub flow](https://docs.github.com/en/get-started/quickstart/github-flow) -- [Collaborating with pull requests](https://docs.github.com/en/github/collaborating-with-pull-requests) - -## Pull Request Process - - -### Pull Request - -- When you're finished with the changes, create a pull request, also known as a PR. It is also OK to create a [draft pull request](https://github.blog/2019-02-14-introducing-draft-pull-requests/) from the very beginning. Once you are done you can click on the ["Ready for review"] button. You can also [request a review](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/requesting-a-pull-request-review) from one of the maintainers. -- Don't forget to [link PR to the issue that you opened ](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue). -- Enable the checkbox to [allow maintainer edits](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/allowing-changes-to-a-pull-request-branch-created-from-a-fork) so the branch can be updated for a merge. -Once you submit your PR, a team member will review your proposal. We may ask questions or request for additional information. -- We may ask for changes to be made before a PR can be merged, either using [suggested changes](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/incorporating-feedback-in-your-pull-request) or pull request comments. You can apply suggested changes directly through the UI. You can make any other changes in your fork, then commit them to your branch. -- As you update your PR and apply changes, mark each conversation as [resolved](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/commenting-on-a-pull-request#resolving-conversations). -- If you run into any merge issues, checkout this [git tutorial](https://lab.github.com/githubtraining/managing-merge-conflicts) to help you resolve merge conflicts and other issues. -- Please make sure that all tests are passing, github pages renders nicely, and code coverage are are not lower than before your contribution. You see the different github action workflows by clicking the "Action" tab in the GitHub repository. - -Note that for a pull request to be accepted, it has to pass all the tests on CI, which includes: -- `mypy`: typechecking -- `ruff`: Code formatting -- `pytest`: Successfull execution of all tests in the `tests` folder. - - -### Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. From 03718655b9a6ec7af189d807615c25169253fff6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Mon, 25 Mar 2024 11:08:31 +0100 Subject: [PATCH 35/49] Update README.md conda installation instructions (#96) --- README.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index f33153a..7b9c9d6 100644 --- a/README.md +++ b/README.md @@ -51,9 +51,14 @@ Following is a minimal recipe of how to install adios4dolfinx, given that you ha conda create -n dolfinx-checkpoint python=3.10 conda activate dolfinx-checkpoint conda install -c conda-forge fenics-dolfinx pip adios2 -python3 -m pip install git+https://github.com/jorgensd/adios4dolfinx@v0.7.3 +python3 -m pip install adios4dolfinx[test]@git+https://github.com/jorgensd/adios4dolfinx@v0.7.3 ``` +> [!NOTE] +> To run the tests or demos associated with the code, install `ipyparallel` in your environment, for instance by calling +> ```bash +> python3 -m pip install adios4dolfinx[test]@git+https://github.com/jorgensd/adios4dolfinx@v0.7.3 +> ``` ## Functionality ### DOLFINx From ca49b426e217b9271ff8216f9f9dcb8d68546f98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Mon, 25 Mar 2024 11:22:45 +0100 Subject: [PATCH 36/49] Update README.md (#97) --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 7b9c9d6..3c8974f 100644 --- a/README.md +++ b/README.md @@ -26,13 +26,13 @@ Use the `nightly` tag to get the main branch of DOLFINx, or `stable` to get the ```bash docker run -ti -v $(pwd):/root/shared -w /root/shared --name=dolfinx-checkpoint ghcr.io/fenics/dolfinx/dolfinx:nightly ``` -For the latest version compatible with nightly, use +For the latest version compatible with nightly (with the ability to run the test suite), use ```bash -python3 -m pip install git+https://github.com/jorgensd/adios4dolfinx@main +python3 -m pip install adios4dolfinx[test]@git+https://github.com/jorgensd/adios4dolfinx@main ``` If you are using the `stable` image, you can install `adios4dolfinx` from [PYPI](https://pypi.org/project/adios4dolfinx/) with ```bash -python3 -m pip install adios4dolfinx +python3 -m pip install adios4dolfinx[test] ``` This docker container can be opened with From abe405b1784499fa3f7bcb29ca8f7f10e95ba7e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Tue, 9 Apr 2024 14:44:18 +0200 Subject: [PATCH 37/49] Switch adios2 test to release (#98) --- .github/workflows/test_package_openmpi.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/test_package_openmpi.yml b/.github/workflows/test_package_openmpi.yml index ec8d143..4d9c102 100644 --- a/.github/workflows/test_package_openmpi.yml +++ b/.github/workflows/test_package_openmpi.yml @@ -17,7 +17,6 @@ on: - cron: "0 8 * * *" jobs: - create-datasets: uses: ./.github/workflows/create_legacy_data.yml with: @@ -40,7 +39,7 @@ jobs: strategy: matrix: - adios2: ["default", "v2.10.0-rc1"] + adios2: ["default", "v2.10.0"] steps: - uses: actions/checkout@v4 From c76cccde6c379fcfa3b87ea65d4c726cb3b881f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Tue, 9 Apr 2024 16:28:50 +0200 Subject: [PATCH 38/49] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3c8974f..8e1eb29 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ ADIOS4DOLFINx is an extension for [DOLFINx](https://github.com/FEniCS/dolfinx/) to checkpoint meshes, meshtags and functions using [ADIOS 2](https://adios2.readthedocs.io/en/latest/). The code uses the ADIOS2 Python-wrappers to write DOLFINx objects to file, supporting N-to-M (_recoverable_) and N-to-N (_snapshot_) checkpointing. -See: [Checkpointing in DOLFINx - FEniCS 23](https://jsdokken.com/checkpointing-presentation/#/) or the examples in thee [Documentation](https://jsdokken.com/adios4dolfinx/) for more information. +See: [Checkpointing in DOLFINx - FEniCS 23](https://jsdokken.com/checkpointing-presentation/#/) or the examples in the [Documentation](https://jsdokken.com/adios4dolfinx/) for more information. For scalability, the code uses [MPI Neighbourhood collectives](https://www.mpi-forum.org/docs/mpi-3.1/mpi31-report/node200.htm) for communication across processes. From 5546cedbd4979c3602fe1f875ab55e7350578d58 Mon Sep 17 00:00:00 2001 From: Nate <34454754+nate-sime@users.noreply.github.com> Date: Sun, 14 Apr 2024 01:23:21 -0700 Subject: [PATCH 39/49] Refactor mesh creation from reading data (#99) * refactor mesh creation from reading data --------- Co-authored-by: nate-sime <> --- src/adios4dolfinx/checkpointing.py | 50 +++++++++++++++++++++++++++--- 1 file changed, 45 insertions(+), 5 deletions(-) diff --git a/src/adios4dolfinx/checkpointing.py b/src/adios4dolfinx/checkpointing.py index 3e1f8fb..ba48d72 100644 --- a/src/adios4dolfinx/checkpointing.py +++ b/src/adios4dolfinx/checkpointing.py @@ -6,6 +6,7 @@ from __future__ import annotations +import typing from pathlib import Path from mpi4py import MPI @@ -43,6 +44,7 @@ adios2 = resolve_adios_scope(adios2) __all__ = [ + "read_mesh_data", "read_mesh", "write_function", "read_function", @@ -438,7 +440,7 @@ def read_function( u.x.scatter_forward() -def read_mesh( +def read_mesh_data( filename: Path | str, comm: MPI.Intracomm, engine: str = "BP4", @@ -446,9 +448,9 @@ def read_mesh( time: float = 0.0, legacy: bool = False, read_from_partition: bool = False, -) -> dolfinx.mesh.Mesh: +) -> tuple[np.ndarray, np.ndarray, ufl.Mesh, typing.Callable]: """ - Read an ADIOS2 mesh into DOLFINx. + Read an ADIOS2 mesh data for use with DOLFINx. Args: filename: Path to input file @@ -460,7 +462,7 @@ def read_mesh( legacy: If checkpoint was made prior to time-dependent mesh-writer set to True read_from_partition: Read mesh with partition from file Returns: - The distributed mesh + The mesh topology, geometry, UFL domain and partition function """ adios = adios2.ADIOS(comm) @@ -571,7 +573,45 @@ def partitioner(comm: MPI.Intracomm, n, m, topo): else: partitioner = dolfinx.cpp.mesh.create_cell_partitioner(ghost_mode) - return dolfinx.mesh.create_mesh(comm, mesh_topology, mesh_geometry, domain, partitioner) + return mesh_topology, mesh_geometry, domain, partitioner + + +def read_mesh( + filename: Path | str, + comm: MPI.Intracomm, + engine: str = "BP4", + ghost_mode: dolfinx.mesh.GhostMode = dolfinx.mesh.GhostMode.shared_facet, + time: float = 0.0, + legacy: bool = False, + read_from_partition: bool = False, +) -> dolfinx.mesh.Mesh: + """ + Read an ADIOS2 mesh into DOLFINx. + + Args: + filename: Path to input file + comm: The MPI communciator to distribute the mesh over + engine: ADIOS engine to use for reading (BP4, BP5 or HDF5) + ghost_mode: Ghost mode to use for mesh. If `read_from_partition` + is set to `True` this option is ignored. + time: Time stamp associated with mesh + legacy: If checkpoint was made prior to time-dependent mesh-writer set to True + read_from_partition: Read mesh with partition from file + Returns: + The distributed mesh + """ + return dolfinx.mesh.create_mesh( + comm, + *read_mesh_data( + filename, + comm, + engine=engine, + ghost_mode=ghost_mode, + time=time, + legacy=legacy, + read_from_partition=read_from_partition, + ), + ) def write_mesh( From 741bab1e2900821f6b8f8405f78a366b542600b1 Mon Sep 17 00:00:00 2001 From: jorgensd Date: Sun, 14 Apr 2024 11:23:31 +0200 Subject: [PATCH 40/49] Make summary more readable for a non-specialist audience. --- joss-paper/paper.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/joss-paper/paper.md b/joss-paper/paper.md index 6cfb230..3ee06bc 100644 --- a/joss-paper/paper.md +++ b/joss-paper/paper.md @@ -20,9 +20,12 @@ bibliography: paper.bib # Summary We introduce a checkpointing framework for the latest version of the FEniCS project, known as DOLFINx. -The framework leverages the data-centric approach of DOLFINx along with a state of the art adaptable Input/Output system called ADIOS2. -Several variations of checkpointing are supported, including *N-to-M* checkpointing of function data, storage of mesh partitioning information for *N-to-N* checkpointing and snapshot checkpointing for RAM reduction during simulation. -All MPI operations are using MPI-3 Neighborhood collectives. +DOLFINx is a general framework for solving partial differential equations using the finite element method. +The input to simulations using the finite element method is the computational domain (mesh), mesh markers, initial conditions and boundary conditions. +To be able to restart a simulation at any point, one has to have the capability to read in all of the aforementioned variables. +The adios4dolfinx package implements all of these operations, using the Message Passing Interface (MPI) for communication across multiple processes and ADIOS2 to for writing/reading data to/from file. +In particular, the functionality of adios4dolfinx includes "N-to-M"-checkpointing, which means that one can store a result of a simulation that was generated with N number of processes, +and read it into a program running on M processes. # Statement of need From b540cdd1fb1ad56f35ba29a11b43d62d5b1977bd Mon Sep 17 00:00:00 2001 From: jorgensd Date: Sun, 14 Apr 2024 11:24:20 +0200 Subject: [PATCH 41/49] Use itallic --- joss-paper/paper.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/joss-paper/paper.md b/joss-paper/paper.md index 3ee06bc..ca89b05 100644 --- a/joss-paper/paper.md +++ b/joss-paper/paper.md @@ -24,7 +24,7 @@ DOLFINx is a general framework for solving partial differential equations using The input to simulations using the finite element method is the computational domain (mesh), mesh markers, initial conditions and boundary conditions. To be able to restart a simulation at any point, one has to have the capability to read in all of the aforementioned variables. The adios4dolfinx package implements all of these operations, using the Message Passing Interface (MPI) for communication across multiple processes and ADIOS2 to for writing/reading data to/from file. -In particular, the functionality of adios4dolfinx includes "N-to-M"-checkpointing, which means that one can store a result of a simulation that was generated with N number of processes, +In particular, the functionality of adios4dolfinx includes *N-to-M*-checkpointing, which means that one can store a result of a simulation that was generated with N number of processes, and read it into a program running on M processes. # Statement of need From 71f84b3b72bc02383bb84dcccf997200208d4611 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Sun, 14 Apr 2024 11:43:01 +0200 Subject: [PATCH 42/49] Update summary of paper for a non-expert audience (#103) * Update summary for a non-specialist audience * Update date on manuscritp --- joss-paper/paper.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/joss-paper/paper.md b/joss-paper/paper.md index ca89b05..a898b38 100644 --- a/joss-paper/paper.md +++ b/joss-paper/paper.md @@ -12,7 +12,7 @@ authors: affiliations: - name: Simula Research Laboratory index: 1 -date: 6 March 2024 +date: 14 April 2024 bibliography: paper.bib --- From 1ad5a35dbd9df8b019dded1e6cd322f51692d6a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Sun, 14 Apr 2024 12:04:12 +0200 Subject: [PATCH 43/49] Add statement of need (#101) --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 8e1eb29..1bd5a6c 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,13 @@ See: [Checkpointing in DOLFINx - FEniCS 23](https://jsdokken.com/checkpointing-p For scalability, the code uses [MPI Neighbourhood collectives](https://www.mpi-forum.org/docs/mpi-3.1/mpi31-report/node200.htm) for communication across processes. +## Statement of Need +As the usage of high performance computing clusters increases, more and more large-scale, long-running simulations are deployed. +The need for storing intermediate solutions from such simulations are crucial, as the HPC system might crash, or the simulation might crash or exceed the alloted computational budget. +Having a checkpoint of related variables, such as the solutions to partial differential equations (PDEs) is therefore essential. +The `adios4dolfinx` library extends the [DOLFINx](https://github.com/FEniCS/dolfinx/) computational framework for solving PDEs with checkpointing functionality, such that immediate solutions and mesh information can be stored and re-used in another simulation. + + ## Installation Compatibility with DOLFINx: - ADIOS4DOLFINx v0.7.3 is compatible with DOLFINx v0.7.x From db5271babace7ee65685cf00bdf4a817d1b678fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Sun, 14 Apr 2024 12:21:43 +0200 Subject: [PATCH 44/49] Add list of examples to README.md (#100) * Add list of examples to README.md * Fix reference --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 1bd5a6c..633060b 100644 --- a/README.md +++ b/README.md @@ -85,6 +85,15 @@ python3 -m pip install adios4dolfinx[test]@git+https://github.com/jorgensd/adios > [!IMPORTANT] > Only one mesh per file is allowed +## Example Usage +The repository contains many documented examples of usage, in the `docs`-folder, including +- [Reading and writing mesh checkpoints](./docs/writing_mesh_checkpoint.py) +- [Storing mesh partitioning data](./docs/partitioned_mesh.py) +- [Writing mesh-tags to a checkpoint](./docs/meshtags.py) +- [Reading and writing function checkpoints](./docs/writing_functions_checkpoint.py) +- [Checkpoint on input mesh](./docs/original_checkpoint.py) +Further examples can be found at [ADIOS4DOLFINx examples](https://jsdokken.com/adios4dolfinx/) + ### Backwards compatibility > [!WARNING] From 9043b8272b1301b2e1fb932986c1f26e3d2866b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Sun, 14 Apr 2024 12:46:02 +0200 Subject: [PATCH 45/49] Add test instructions (#102) --- README.md | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/README.md b/README.md index 633060b..dc28d26 100644 --- a/README.md +++ b/README.md @@ -109,6 +109,39 @@ Only checkpoints for `Lagrange` or `DG` functions are supported from legacy DOLF See the [API](./docs/api) for more information. +## Testing + +This library uses `pytest` for testing. +To execute the tests, one should first install the library and its dependencies, as listed above. +Then, can execute all tests by calling +```bash +python3 -m pytest . +``` + +### Testing against data from legacy dolfin +Some tests check the capability of reading data created with the legacy version of DOLFIN. +To create this dataset, start a docker container with legacy DOLFIN, for instance: +```bash +docker run -ti -v $(pwd):/root/shared -w /root/s +hared --rm ghcr.io/scientificcomputing/fenics:2024-02-19 +``` +Then, inside this container, call +```bash +python3 ./tests/create_legacy_data.py --output-dir=legacy +``` + +### Testing against data from older versions of ADIOS4DOLFINx +Some tests check the capability to read data generated by `adios4dolfinx<0.7.2`. +To generate data for these tests use the following commands: +```bash +docker run -ti -v $(pwd):/root/shared -w /root/shared --rm ghcr.io/fenics/dolfinx/dolfinx:v0.7.3 +``` +Then, inside the container, call +```bash +python3 -m pip install adios4dolfinx==0.7.1 +python3 ./tests/create_legacy_checkpoint.py --output-dir=legacy_checkpoint +``` + ## Long term plan The long term plan is to get this library merged into DOLFINx (rewritten in C++ with appropriate Python-bindings). From 934fffb1d83ce91bf42ba012e739728501d57986 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Tue, 16 Apr 2024 11:23:00 +0100 Subject: [PATCH 46/49] API change from https://github.com/FEniCS/dolfinx/pull/3119 (#104) --- src/adios4dolfinx/checkpointing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/adios4dolfinx/checkpointing.py b/src/adios4dolfinx/checkpointing.py index ba48d72..f1640bd 100644 --- a/src/adios4dolfinx/checkpointing.py +++ b/src/adios4dolfinx/checkpointing.py @@ -284,8 +284,8 @@ def read_meshtags( adios_file.file.PerformGets() adios_file.file.EndStep() - local_entities, local_values = dolfinx.cpp.io.distribute_entity_data( - mesh._cpp_object, int(dim), mesh_entities, tag_values + local_entities, local_values = dolfinx.io.distribute_entity_data( + mesh, int(dim), mesh_entities, tag_values ) mesh.topology.create_connectivity(dim, 0) mesh.topology.create_connectivity(dim, mesh.topology.dim) From 3f3025b703d86f00ca02cf06ff0f079792cc5d3e Mon Sep 17 00:00:00 2001 From: Henrik Finsberg Date: Sat, 20 Apr 2024 09:53:22 +0200 Subject: [PATCH 47/49] Fix bug in reading legacy mesh. Should check that file exist before opening it (#105) --- src/adios4dolfinx/legacy_readers.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/adios4dolfinx/legacy_readers.py b/src/adios4dolfinx/legacy_readers.py index 6df3bfe..9497274 100644 --- a/src/adios4dolfinx/legacy_readers.py +++ b/src/adios4dolfinx/legacy_readers.py @@ -276,6 +276,11 @@ def read_mesh_from_legacy_h5( group: Name of mesh in `h5`-file cell_type: What type of cell type, by default tetrahedron. """ + # Make sure we use the HDF5File and check that the file is present + filename = pathlib.Path(filename).with_suffix(".h5") + if not filename.is_file(): + raise FileNotFoundError(f"File {filename} does not exist") + # Create ADIOS2 reader adios = adios2.ADIOS(comm) with ADIOSFile( @@ -285,11 +290,6 @@ def read_mesh_from_legacy_h5( io_name="Mesh reader", engine="HDF5", ) as adios_file: - # Make sure we use the HDF5File and check that the file is present - filename = pathlib.Path(filename).with_suffix(".h5") - if not filename.is_file(): - raise FileNotFoundError(f"File {filename} does not exist") - # Get mesh topology (distributed) if f"{group}/topology" not in adios_file.io.AvailableVariables().keys(): raise KeyError(f"Mesh topology not found at '{group}/topology'") From 6347e436ce157028fb3be17ee7cadbe05c30e286 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Tue, 23 Apr 2024 21:35:18 +0200 Subject: [PATCH 48/49] API updates (#107) * API updates relating to: https://github.com/FEniCS/dolfinx/pull/3149 * Updates due to: https://github.com/FEniCS/dolfinx/pull/3119 --- src/adios4dolfinx/checkpointing.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/src/adios4dolfinx/checkpointing.py b/src/adios4dolfinx/checkpointing.py index f1640bd..6d2ec5e 100644 --- a/src/adios4dolfinx/checkpointing.py +++ b/src/adios4dolfinx/checkpointing.py @@ -285,7 +285,7 @@ def read_meshtags( adios_file.file.EndStep() local_entities, local_values = dolfinx.io.distribute_entity_data( - mesh, int(dim), mesh_entities, tag_values + mesh, int(dim), mesh_entities.astype(np.int32), tag_values ) mesh.topology.create_connectivity(dim, 0) mesh.topology.create_connectivity(dim, mesh.topology.dim) @@ -396,12 +396,8 @@ def read_function( dofmap_sorted_by_input = recv_array[unrolled_dofmap_position] # First invert input data to reference element then transform to current mesh - element.pre_apply_transpose_dof_transformation( - dofmap_sorted_by_input, input_perms_sorted, bs - ) - element.pre_apply_inverse_transpose_dof_transformation( - dofmap_sorted_by_input, inc_perms, bs - ) + element.Tt_apply(dofmap_sorted_by_input, input_perms_sorted, bs) + element.Tt_inv_apply(dofmap_sorted_by_input, inc_perms, bs) # Compute invert permutation inverted_perm = np.empty_like(unrolled_dofmap_position) inverted_perm[unrolled_dofmap_position] = np.arange( From b7eb4a47af5b34ec4f0e554694d842415d5250ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Wed, 24 Apr 2024 15:16:57 +0200 Subject: [PATCH 49/49] Increase MPICH runners to 4 processes (#108) * Increase MPICH runners to 4 processes * Try bumping num procs for openmpi as well * Try oversubscribing * Use environment variable --- .github/workflows/test_package.yml | 9 ++++----- .github/workflows/test_package_openmpi.yml | 5 +++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/test_package.yml b/.github/workflows/test_package.yml index b3e5ff7..a61943c 100644 --- a/.github/workflows/test_package.yml +++ b/.github/workflows/test_package.yml @@ -17,7 +17,6 @@ on: - cron: "0 8 * * *" jobs: - create-datasets: uses: ./.github/workflows/create_legacy_data.yml with: @@ -27,7 +26,7 @@ jobs: uses: ./.github/workflows/create_legacy_checkpoint.yml with: artifact_name: "legacy_checkpoint_mpich" - + check-formatting: uses: ./.github/workflows/check_formatting.yml @@ -49,13 +48,13 @@ jobs: with: name: legacy_mpich path: ./legacy - + - name: Download legacy data uses: actions/download-artifact@v4 with: name: legacy_checkpoint_mpich path: ./legacy_checkpoint - + - name: Install package run: python3 -m pip install .[test] @@ -65,7 +64,7 @@ jobs: - name: Run tests in parallel run: | - mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py -m pytest -xvs ./tests/ + mpirun -n 4 coverage run --rcfile=.coveragerc -m mpi4py -m pytest -xvs ./tests/ - name: Combine coverage reports run: | diff --git a/.github/workflows/test_package_openmpi.yml b/.github/workflows/test_package_openmpi.yml index 4d9c102..2d0c7a0 100644 --- a/.github/workflows/test_package_openmpi.yml +++ b/.github/workflows/test_package_openmpi.yml @@ -36,6 +36,7 @@ jobs: PETSC_ARCH: "linux-gnu-real64-32" OMPI_ALLOW_RUN_AS_ROOT: 1 OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 + PRTE_MCA_rmaps_default_mapping_policy: :oversubscribe strategy: matrix: @@ -51,7 +52,7 @@ jobs: with: adios2: ${{ matrix.adios2 }} petsc_arch: ${{ env.PETSC_ARCH }} - dolfinx: main + dolfinx: main - name: Download legacy data uses: actions/download-artifact@v4 @@ -74,4 +75,4 @@ jobs: - name: Run tests in parallel run: | - mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py -m pytest -xvs ./tests/ + mpirun -n 4 coverage run --rcfile=.coveragerc -m mpi4py -m pytest -xvs ./tests/