Skip to content

Commit

Permalink
test with smaller grid
Browse files Browse the repository at this point in the history
  • Loading branch information
mscroggs committed Jan 28, 2025
1 parent 7ed91cf commit bf8e17c
Show file tree
Hide file tree
Showing 6 changed files with 36 additions and 49 deletions.
21 changes: 10 additions & 11 deletions .github/workflows/run-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@ name: 🧪
on:
push:
branches: ["**"]
# pull_request:
# branches: [main]
# merge_group:
pull_request:
branches: [main]
merge_group:

jobs:
run-tests-rust:
Expand All @@ -15,8 +15,7 @@ jobs:
matrix:
rust-version: ["stable"]
mpi: ['openmpi']
# feature-flags: ['--features "strict"', '--features "serde,strict"']
feature-flags: ['--features "strict"']
feature-flags: ['--features "strict"', '--features "serde,strict"']
steps:
- name: Set up Rust
uses: actions-rust-lang/setup-rust-toolchain@v1
Expand All @@ -33,12 +32,12 @@ jobs:
run:
sudo apt-get install -y libopenblas-dev liblapack-dev

#- name: Run unit tests
# run: cargo test ${{ matrix.feature-flags }}
#- name: Run unit tests in release mode
# run: cargo test --release ${{ matrix.feature-flags }}
#- name: Run tests
# run: cargo test --examples --release ${{ matrix.feature-flags }}
- name: Run unit tests
run: cargo test ${{ matrix.feature-flags }}
- name: Run unit tests in release mode
run: cargo test --release ${{ matrix.feature-flags }}
- name: Run tests
run: cargo test --examples --release ${{ matrix.feature-flags }}
- name: Run examples
run: |
python3 find_examples.py
Expand Down
4 changes: 0 additions & 4 deletions examples/parallel_grid.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ fn main() {
let universe: Universe = mpi::initialize().unwrap();
let comm = universe.world();
let rank = comm.rank();
println!("{rank} A");
let grid = if rank == 0 {
let mut i = 0;
for y in 0..n {
Expand All @@ -38,13 +37,10 @@ fn main() {
}
}

println!("{rank} B");
b.create_parallel_grid_root(&comm, GraphPartitioner::None)
} else {
println!("{rank} B");
b.create_parallel_grid(&comm, 0)
};
println!("{rank} C");

// Check that owned cells are sorted ahead of ghost cells

Expand Down
30 changes: 15 additions & 15 deletions examples/test_partitioners.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,25 +16,25 @@ fn run_test<C: Communicator>(comm: &C, partitioner: GraphPartitioner) {

let mut b = SingleElementGridBuilder::<f64>::new(2, (ReferenceCellType::Quadrilateral, 1));

let mut i = 0;
for y in 0..n {
for x in 0..n {
b.add_point(i, &[x as f64 / (n - 1) as f64, y as f64 / (n - 1) as f64]);
i += 1;
let rank = comm.rank();
let grid = if rank == 0 {
let mut i = 0;
for y in 0..n {
for x in 0..n {
b.add_point(i, &[x as f64 / (n - 1) as f64, y as f64 / (n - 1) as f64]);
i += 1;
}
}
}

let mut i = 0;
for y in 0..n - 1 {
for x in 0..n - 1 {
let sw = n * y + x;
b.add_cell(i, &[sw, sw + 1, sw + n, sw + n + 1]);
i += 1;
let mut i = 0;
for y in 0..n - 1 {
for x in 0..n - 1 {
let sw = n * y + x;
b.add_cell(i, &[sw, sw + 1, sw + n, sw + n + 1]);
i += 1;
}
}
}

let rank = comm.rank();
let grid = if rank == 0 {
b.create_parallel_grid_root(comm, partitioner)
} else {
b.create_parallel_grid(comm, 0)
Expand Down
2 changes: 0 additions & 2 deletions find_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,6 @@
and file.endswith(".rs")
and os.path.isfile(os.path.join(example_dir, file))
):
if file == "test_partitioners.rs":
continue
files.append((os.path.join(example_dir, file), file[:-3]))

# Check that two examples do not share a name
Expand Down
27 changes: 11 additions & 16 deletions src/grid/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,24 +68,23 @@ where

return ParallelGridImpl::new(comm, serial_grid, owners, global_indices);
}
println!("ROOT 0");
// Partition the cells via a KMeans algorithm. The midpoint of each cell is used and distributed
// across processes via coupe. The returned array assigns each cell a process.
let cell_owners = self.partition_cells(comm.size() as usize, partitioner);
println!("ROOT 1");

// Each vertex is assigned the minimum process that has a cell that contains it.
// Note that the array is of the size of the points in the grid and contains garbage information
// for points that are not vertices.
let vertex_owners = self.assign_vertex_owners(comm.size() as usize, &cell_owners);
println!("ROOT 2");

// This distributes cells, vertices and points to processes.
// Each process gets now only the cell that it owns via `cell_owners` but also its neighbours.
// Then all the corresponding vertices and points are also added to the corresponding cell.
// Each return value is a tuple consisting of a counts array that specifies how many indices are
// assigned to each process and the actual indices.
let (vertices_per_proc, points_per_proc, cells_per_proc) =
self.get_vertices_points_and_cells(comm.size() as usize, &cell_owners);
println!("ROOT 3");

// Compute the chunks array for the coordinates associated with the points.
// The idx array for the coords is simply `self.gdim()` times the idx array for the points.
let coords_per_proc = ChunkedData {
Expand All @@ -104,7 +103,7 @@ where
.map(|x| self.gdim() * x)
.collect(),
};
println!("ROOT 4");

// This compputes for each process the vertex owners.
let vertex_owners_per_proc = ChunkedData::<usize> {
data: {
Expand All @@ -117,7 +116,7 @@ where
},
idx_bounds: vertices_per_proc.idx_bounds.clone(),
};
println!("ROOT 5");

// We now compute the cell information for each process.

// First we need the cell type.
Expand All @@ -132,7 +131,7 @@ where
},
idx_bounds: cells_per_proc.idx_bounds.clone(),
};
println!("ROOT 6");

// Now we need the cell degrees.
let cell_degrees_per_proc = ChunkedData {
data: {
Expand All @@ -145,7 +144,7 @@ where
},
idx_bounds: cells_per_proc.idx_bounds.clone(),
};
println!("ROOT 7");

// Now need the cell owners.
let cell_owners_per_proc = ChunkedData {
data: {
Expand All @@ -158,9 +157,9 @@ where
},
idx_bounds: cells_per_proc.idx_bounds.clone(),
};

// Finally the cell points. These are more messy since each cell might have different numbers of points. Hence,
// we need to compute a new counts array.
println!("ROOT 8");
let cell_points_per_proc = {
// First compute the total number of points needed so that we can pre-allocated the array.
let total_points = cells_per_proc
Expand Down Expand Up @@ -193,7 +192,7 @@ where
// Finally return the chunks
ChunkedData { data, idx_bounds }
};
println!("ROOT 9");

// Now we send everything across the processes. Every _per_proc variable is scattered across
// the processes. After the scatter operation we have on each process the following data.
// - `point_indices` contains the indices of the points that are owned by the current process.
Expand All @@ -205,7 +204,6 @@ where
// - `cell_types` contains the types of the cells that are owned by the current process.
// - `cell_degrees` contains the degrees of the cells that are owned by the current process.
// - `cell_owners` contains the owners of the cells that are owned by the current process.
println!("ROOT 10");
let point_indices = scatterv_root(comm, &points_per_proc);
let coordinates = scatterv_root(comm, &coords_per_proc);
let vertex_indices = scatterv_root(comm, &vertices_per_proc);
Expand All @@ -216,9 +214,8 @@ where
let cell_degrees = scatterv_root(comm, &cell_degrees_per_proc);
let cell_owners = scatterv_root(comm, &cell_owners_per_proc);

println!("ROOT 11");
// This is executed on all ranks and creates the local grid.
let r = self.create_parallel_grid_internal(
self.create_parallel_grid_internal(
comm,
point_indices,
coordinates,
Expand All @@ -229,9 +226,7 @@ where
cell_types,
cell_degrees,
cell_owners,
);
println!("ROOT 12");
r
)
}
fn create_parallel_grid<'a, C: Communicator>(
&self,
Expand Down
1 change: 0 additions & 1 deletion src/io/gmsh.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,6 @@ impl<G: Grid<EntityDescriptor = ReferenceCellType>> GmshExport for G {
let mut coords = vec![G::T::zero(); gdim];
for node in self.entity_iter(0) {
node.geometry().points().next().unwrap().coords(&mut coords);
println!("{coords:?}");
for (n, c) in coords.iter().enumerate() {
if n != 0 {
gmsh_s.push(' ');
Expand Down

0 comments on commit bf8e17c

Please sign in to comment.