diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 311274f..e2f36eb 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -3,9 +3,9 @@ name: 🧪 on: push: branches: ["**"] -# pull_request: -# branches: [main] -# merge_group: + pull_request: + branches: [main] + merge_group: jobs: run-tests-rust: @@ -15,8 +15,7 @@ jobs: matrix: rust-version: ["stable"] mpi: ['openmpi'] - # feature-flags: ['--features "strict"', '--features "serde,strict"'] - feature-flags: ['--features "strict"'] + feature-flags: ['--features "strict"', '--features "serde,strict"'] steps: - name: Set up Rust uses: actions-rust-lang/setup-rust-toolchain@v1 @@ -33,12 +32,12 @@ jobs: run: sudo apt-get install -y libopenblas-dev liblapack-dev - #- name: Run unit tests - # run: cargo test ${{ matrix.feature-flags }} - #- name: Run unit tests in release mode - # run: cargo test --release ${{ matrix.feature-flags }} - #- name: Run tests - # run: cargo test --examples --release ${{ matrix.feature-flags }} + - name: Run unit tests + run: cargo test ${{ matrix.feature-flags }} + - name: Run unit tests in release mode + run: cargo test --release ${{ matrix.feature-flags }} + - name: Run tests + run: cargo test --examples --release ${{ matrix.feature-flags }} - name: Run examples run: | python3 find_examples.py diff --git a/examples/parallel_grid.rs b/examples/parallel_grid.rs index 5a57371..789b537 100644 --- a/examples/parallel_grid.rs +++ b/examples/parallel_grid.rs @@ -19,7 +19,6 @@ fn main() { let universe: Universe = mpi::initialize().unwrap(); let comm = universe.world(); let rank = comm.rank(); - println!("{rank} A"); let grid = if rank == 0 { let mut i = 0; for y in 0..n { @@ -38,13 +37,10 @@ fn main() { } } - println!("{rank} B"); b.create_parallel_grid_root(&comm, GraphPartitioner::None) } else { - println!("{rank} B"); b.create_parallel_grid(&comm, 0) }; - println!("{rank} C"); // Check that owned cells are sorted ahead of ghost cells diff --git a/examples/test_partitioners.rs b/examples/test_partitioners.rs index f1761dd..c3d4755 100644 --- a/examples/test_partitioners.rs +++ b/examples/test_partitioners.rs @@ -16,25 +16,25 @@ fn run_test(comm: &C, partitioner: GraphPartitioner) { let mut b = SingleElementGridBuilder::::new(2, (ReferenceCellType::Quadrilateral, 1)); - let mut i = 0; - for y in 0..n { - for x in 0..n { - b.add_point(i, &[x as f64 / (n - 1) as f64, y as f64 / (n - 1) as f64]); - i += 1; + let rank = comm.rank(); + let grid = if rank == 0 { + let mut i = 0; + for y in 0..n { + for x in 0..n { + b.add_point(i, &[x as f64 / (n - 1) as f64, y as f64 / (n - 1) as f64]); + i += 1; + } } - } - let mut i = 0; - for y in 0..n - 1 { - for x in 0..n - 1 { - let sw = n * y + x; - b.add_cell(i, &[sw, sw + 1, sw + n, sw + n + 1]); - i += 1; + let mut i = 0; + for y in 0..n - 1 { + for x in 0..n - 1 { + let sw = n * y + x; + b.add_cell(i, &[sw, sw + 1, sw + n, sw + n + 1]); + i += 1; + } } - } - let rank = comm.rank(); - let grid = if rank == 0 { b.create_parallel_grid_root(comm, partitioner) } else { b.create_parallel_grid(comm, 0) diff --git a/find_examples.py b/find_examples.py index 316f539..5495244 100644 --- a/find_examples.py +++ b/find_examples.py @@ -34,8 +34,6 @@ and file.endswith(".rs") and os.path.isfile(os.path.join(example_dir, file)) ): - if file == "test_partitioners.rs": - continue files.append((os.path.join(example_dir, file), file[:-3])) # Check that two examples do not share a name diff --git a/src/grid/builder.rs b/src/grid/builder.rs index 2759958..aabf39f 100644 --- a/src/grid/builder.rs +++ b/src/grid/builder.rs @@ -68,16 +68,15 @@ where return ParallelGridImpl::new(comm, serial_grid, owners, global_indices); } - println!("ROOT 0"); // Partition the cells via a KMeans algorithm. The midpoint of each cell is used and distributed // across processes via coupe. The returned array assigns each cell a process. let cell_owners = self.partition_cells(comm.size() as usize, partitioner); - println!("ROOT 1"); + // Each vertex is assigned the minimum process that has a cell that contains it. // Note that the array is of the size of the points in the grid and contains garbage information // for points that are not vertices. let vertex_owners = self.assign_vertex_owners(comm.size() as usize, &cell_owners); - println!("ROOT 2"); + // This distributes cells, vertices and points to processes. // Each process gets now only the cell that it owns via `cell_owners` but also its neighbours. // Then all the corresponding vertices and points are also added to the corresponding cell. @@ -85,7 +84,7 @@ where // assigned to each process and the actual indices. let (vertices_per_proc, points_per_proc, cells_per_proc) = self.get_vertices_points_and_cells(comm.size() as usize, &cell_owners); - println!("ROOT 3"); + // Compute the chunks array for the coordinates associated with the points. // The idx array for the coords is simply `self.gdim()` times the idx array for the points. let coords_per_proc = ChunkedData { @@ -104,7 +103,7 @@ where .map(|x| self.gdim() * x) .collect(), }; - println!("ROOT 4"); + // This compputes for each process the vertex owners. let vertex_owners_per_proc = ChunkedData:: { data: { @@ -117,7 +116,7 @@ where }, idx_bounds: vertices_per_proc.idx_bounds.clone(), }; - println!("ROOT 5"); + // We now compute the cell information for each process. // First we need the cell type. @@ -132,7 +131,7 @@ where }, idx_bounds: cells_per_proc.idx_bounds.clone(), }; - println!("ROOT 6"); + // Now we need the cell degrees. let cell_degrees_per_proc = ChunkedData { data: { @@ -145,7 +144,7 @@ where }, idx_bounds: cells_per_proc.idx_bounds.clone(), }; - println!("ROOT 7"); + // Now need the cell owners. let cell_owners_per_proc = ChunkedData { data: { @@ -158,9 +157,9 @@ where }, idx_bounds: cells_per_proc.idx_bounds.clone(), }; + // Finally the cell points. These are more messy since each cell might have different numbers of points. Hence, // we need to compute a new counts array. - println!("ROOT 8"); let cell_points_per_proc = { // First compute the total number of points needed so that we can pre-allocated the array. let total_points = cells_per_proc @@ -193,7 +192,7 @@ where // Finally return the chunks ChunkedData { data, idx_bounds } }; - println!("ROOT 9"); + // Now we send everything across the processes. Every _per_proc variable is scattered across // the processes. After the scatter operation we have on each process the following data. // - `point_indices` contains the indices of the points that are owned by the current process. @@ -205,7 +204,6 @@ where // - `cell_types` contains the types of the cells that are owned by the current process. // - `cell_degrees` contains the degrees of the cells that are owned by the current process. // - `cell_owners` contains the owners of the cells that are owned by the current process. - println!("ROOT 10"); let point_indices = scatterv_root(comm, &points_per_proc); let coordinates = scatterv_root(comm, &coords_per_proc); let vertex_indices = scatterv_root(comm, &vertices_per_proc); @@ -216,9 +214,8 @@ where let cell_degrees = scatterv_root(comm, &cell_degrees_per_proc); let cell_owners = scatterv_root(comm, &cell_owners_per_proc); - println!("ROOT 11"); // This is executed on all ranks and creates the local grid. - let r = self.create_parallel_grid_internal( + self.create_parallel_grid_internal( comm, point_indices, coordinates, @@ -229,9 +226,7 @@ where cell_types, cell_degrees, cell_owners, - ); - println!("ROOT 12"); - r + ) } fn create_parallel_grid<'a, C: Communicator>( &self, diff --git a/src/io/gmsh.rs b/src/io/gmsh.rs index ec5e44d..4fbe29e 100644 --- a/src/io/gmsh.rs +++ b/src/io/gmsh.rs @@ -100,7 +100,6 @@ impl> GmshExport for G { let mut coords = vec![G::T::zero(); gdim]; for node in self.entity_iter(0) { node.geometry().points().next().unwrap().coords(&mut coords); - println!("{coords:?}"); for (n, c) in coords.iter().enumerate() { if n != 0 { gmsh_s.push(' ');