Skip to content

Commit

Permalink
Make coupe optional (#84)
Browse files Browse the repository at this point in the history
* make coupe optional

* weekly tests

* add test for partitioners

* fmt

* add Manual partitioner
  • Loading branch information
mscroggs authored Jan 28, 2025
1 parent 24a488e commit 2c7bae8
Show file tree
Hide file tree
Showing 10 changed files with 238 additions and 89 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/run-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ jobs:
uses: mpi4py/setup-mpi@v1
with:
mpi: ${{ matrix.mpi }}
# - name: Install cargo-mpirun
# run: cargo install cargo-mpirun
- name: Install cargo-mpirun
run: cargo install cargo-mpirun
- uses: actions/checkout@v4
- name: Install LAPACK, OpenBLAS
run:
Expand Down
13 changes: 7 additions & 6 deletions .github/workflows/run-weekly-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ jobs:
mpi: [ 'openmpi']
f_strict: ["", "strict,"]
f_serde: ["", "serde,"]
f_coupe: ["", "coupe,"]
steps:
- name: Set up Rust
uses: actions-rust-lang/setup-rust-toolchain@v1
Expand All @@ -31,24 +32,24 @@ jobs:
sudo apt-get install -y libopenblas-dev liblapack-dev

- name: Build rust library
run: cargo build --features "${{matrix.f_strict}}${{matrix.f_serde}}"
run: cargo build --features "${{matrix.f_strict}}${{matrix.f_serde}}${{matrix.f_coupe}}"
- name: Build rust library in release mode
run: cargo build --release --features "${{matrix.f_strict}}${{matrix.f_serde}}"
run: cargo build --release --features "${{matrix.f_strict}}${{matrix.f_serde}}${{matrix.f_coupe}}"

- name: Run unit tests
run: cargo test --features "${{matrix.f_strict}}${{matrix.f_serde}}"
run: cargo test --features "${{matrix.f_strict}}${{matrix.f_serde}}${{matrix.f_coupe}}"
- name: Run unit tests in release mode
run: cargo test --release --features "${{matrix.f_strict}}${{matrix.f_serde}}"
run: cargo test --release --features "${{matrix.f_strict}}${{matrix.f_serde}}${{matrix.f_coupe}}"
- name: Run tests
run: cargo test --examples --release --features "${{matrix.f_strict}}${{matrix.f_serde}}"
run: cargo test --examples --release --features "${{matrix.f_strict}}${{matrix.f_serde}}${{matrix.f_coupe}}"
- name: Run examples
run: |
python3 find_examples.py
chmod +x examples.sh
./examples.sh
- name: Build docs
run: cargo doc --features "${{matrix.f_strict}}${{matrix.f_serde}}" --no-deps
run: cargo doc --features "${{matrix.f_strict}}${{matrix.f_serde}}${{matrix.f_coupe}}" --no-deps

# run-tests-python:
# name: Run Python tests
Expand Down
3 changes: 2 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ serde = ["dep:serde", "ndelement/serde", "dep:ron"]
strict = []
sleef = ["rlst/sleef"]
default = ["sleef", "serde"]
coupe = ["dep:coupe"]

[package]
name = "ndgrid"
Expand All @@ -26,7 +27,7 @@ name = "ndgrid"
crate-type = ["lib", "cdylib"]

[dependencies]
coupe = { git = "https://github.com/LIHPC-Computational-Geometry/coupe.git" }
coupe = { git = "https://github.com/LIHPC-Computational-Geometry/coupe.git", optional = true }
itertools = "0.14.*"
mpi = { version = "0.8.*" }
ndelement = { git = "https://github.com/bempp/ndelement", default-features = false, features = ["mpi"] }
Expand Down
42 changes: 22 additions & 20 deletions examples/parallel_grid.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
//? mpirun -n {{NPROCESSES}} --features "serde"

use mpi::{
collective::SystemOperation, environment::Universe, topology::Communicator,
traits::CommunicatorCollectives,
Expand All @@ -6,36 +8,36 @@ use ndelement::types::ReferenceCellType;
use ndgrid::{
grid::local_grid::SingleElementGridBuilder,
traits::{Builder, Entity, Grid, ParallelBuilder, ParallelGrid},
types::Ownership,
types::{GraphPartitioner, Ownership},
};

fn main() {
let n = 100;
let n = 10;

let mut b = SingleElementGridBuilder::<f64>::new(2, (ReferenceCellType::Quadrilateral, 1));

let mut i = 0;
for y in 0..n {
for x in 0..n {
b.add_point(i, &[x as f64 / (n - 1) as f64, y as f64 / (n - 1) as f64]);
i += 1;
}
}

let mut i = 0;
for y in 0..n - 1 {
for x in 0..n - 1 {
let sw = n * y + x;
b.add_cell(i, &[sw, sw + 1, sw + n, sw + n + 1]);
i += 1;
}
}

let universe: Universe = mpi::initialize().unwrap();
let comm = universe.world();
let rank = comm.rank();
let grid = if rank == 0 {
b.create_parallel_grid_root(&comm)
let mut i = 0;
for y in 0..n {
for x in 0..n {
b.add_point(i, &[x as f64 / (n - 1) as f64, y as f64 / (n - 1) as f64]);
i += 1;
}
}

let mut i = 0;
for y in 0..n - 1 {
for x in 0..n - 1 {
let sw = n * y + x;
b.add_cell(i, &[sw, sw + 1, sw + n, sw + n + 1]);
i += 1;
}
}

b.create_parallel_grid_root(&comm, GraphPartitioner::None)
} else {
b.create_parallel_grid(&comm, 0)
};
Expand Down
5 changes: 4 additions & 1 deletion examples/test_parallel_io.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
//? mpirun -n {{NPROCESSES}} --features "serde"

#[cfg(feature = "serde")]
use mpi::{collective::CommunicatorCollectives, environment::Universe, traits::Communicator};
#[cfg(feature = "serde")]
Expand All @@ -6,6 +8,7 @@ use ndelement::{ciarlet::CiarletElement, types::ReferenceCellType};
use ndgrid::{
grid::ParallelGridImpl,
traits::{Builder, Grid, ParallelBuilder, RONExportParallel, RONImportParallel},
types::GraphPartitioner,
SingleElementGrid, SingleElementGridBuilder,
};

Expand Down Expand Up @@ -41,7 +44,7 @@ fn example_single_element_grid<C: Communicator>(

if rank == 0 {
create_single_element_grid_data(&mut b, n);
b.create_parallel_grid_root(comm)
b.create_parallel_grid_root(comm, GraphPartitioner::None)
} else {
b.create_parallel_grid(comm, 0)
}
Expand Down
119 changes: 119 additions & 0 deletions examples/test_partitioners.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
//? mpirun -n {{NPROCESSES}} --features "coupe"

use mpi::{
collective::SystemOperation, environment::Universe, topology::Communicator,
traits::CommunicatorCollectives,
};
use ndelement::types::ReferenceCellType;
use ndgrid::{
grid::local_grid::SingleElementGridBuilder,
traits::{Builder, Entity, Grid, ParallelBuilder, ParallelGrid},
types::{GraphPartitioner, Ownership},
};

fn run_test<C: Communicator>(comm: &C, partitioner: GraphPartitioner) {
let n = 10;

let mut b = SingleElementGridBuilder::<f64>::new(2, (ReferenceCellType::Quadrilateral, 1));

let rank = comm.rank();
let grid = if rank == 0 {
let mut i = 0;
for y in 0..n {
for x in 0..n {
b.add_point(i, &[x as f64 / (n - 1) as f64, y as f64 / (n - 1) as f64]);
i += 1;
}
}

let mut i = 0;
for y in 0..n - 1 {
for x in 0..n - 1 {
let sw = n * y + x;
b.add_cell(i, &[sw, sw + 1, sw + n, sw + n + 1]);
i += 1;
}
}

b.create_parallel_grid_root(comm, partitioner)
} else {
b.create_parallel_grid(comm, 0)
};

// Check that owned cells are sorted ahead of ghost cells

let cell_count_owned = grid
.local_grid()
.cell_iter()
.filter(|entity| entity.is_owned())
.count();

// Now check that the first `cell_count_owned` entities are actually owned.
for cell in grid.local_grid().cell_iter().take(cell_count_owned) {
assert!(cell.is_owned())
}

// Now make sure that the indices of the global cells are in consecutive order

let mut cell_global_count = grid.cell_layout().local_range().0;

for cell in grid.local_grid().cell_iter().take(cell_count_owned) {
assert_eq!(cell.global_index(), cell_global_count);
cell_global_count += 1;
}

// Get the global indices.

let global_vertices = grid
.local_grid()
.entity_iter(0)
.filter(|e| matches!(e.ownership(), Ownership::Owned))
.map(|e| e.global_index())
.collect::<Vec<_>>();

let nvertices = global_vertices.len();

let global_cells = grid
.local_grid()
.entity_iter(2)
.filter(|e| matches!(e.ownership(), Ownership::Owned))
.map(|e| e.global_index())
.collect::<Vec<_>>();

let ncells = global_cells.len();

let mut total_cells: usize = 0;
let mut total_vertices: usize = 0;

comm.all_reduce_into(&ncells, &mut total_cells, SystemOperation::sum());
comm.all_reduce_into(&nvertices, &mut total_vertices, SystemOperation::sum());

assert_eq!(total_cells, (n - 1) * (n - 1));
assert_eq!(total_vertices, n * n);
}

fn main() {
let universe: Universe = mpi::initialize().unwrap();
let comm = universe.world();

if comm.rank() == 0 {
println!("Testing GraphPartitioner::None");
}
run_test(&comm, GraphPartitioner::None);

let mut p = vec![];
for i in 0..81 {
p.push(i % comm.size() as usize);
}
if comm.rank() == 0 {
println!("Testing GraphPartitioner::Manual");
}
run_test(&comm, GraphPartitioner::Manual(p));

#[cfg(feature = "coupe")]
if comm.rank() == 0 {
println!("Testing GraphPartitioner::Coupe");
}
#[cfg(feature = "coupe")]
run_test(&comm, GraphPartitioner::Coupe);
}
Loading

0 comments on commit 2c7bae8

Please sign in to comment.