diff --git a/.github/workflows/build-wheels.yml b/.github/workflows/build-wheels.yml index 89edb90..0f402df 100644 --- a/.github/workflows/build-wheels.yml +++ b/.github/workflows/build-wheels.yml @@ -31,7 +31,7 @@ jobs: F77: gfortran CC: mpicc CXX: mpicxx - CIBW_BUILD: cp37-* cp38-* cp39-* + CIBW_BUILD: cp39-* CIBW_BEFORE_BUILD: > brew install cmake pipx hdf5-mpi with: @@ -65,7 +65,7 @@ jobs: apt update -y && apt upgrade && apt install -y build-essential cmake mpich libhdf5-mpich-dev - CIBW_BUILD: cp37-* cp38-* cp39-* + CIBW_BUILD: cp39-* CIBW_ARCHS_LINUX: "auto64" CIBW_MANYLINUX_X86_64_IMAGE: manylinux_2_24 CIBW_MANYLINUX_I686_IMAGE: manylinux_2_24 diff --git a/setup.py b/setup.py index ba914a4..090b162 100644 --- a/setup.py +++ b/setup.py @@ -35,8 +35,12 @@ def __init__(self, name, target="all", cmake_lists_dir=".", **kwa): class cmake_build_ext(build_ext.build_ext): def build_extensions(self): # Ensure that CMake is present and working + cmake_path = "cmake" + if os.environ.get("CMAKE_PATH", False): + cmake_path = os.environ.get("CMAKE_PATH", False) + try: - out = subprocess.check_output(["cmake", "--version"]) + out = subprocess.check_output([cmake_path, "--version"]) except OSError: raise RuntimeError("Cannot find CMake executable") @@ -137,7 +141,7 @@ def build_extensions(self): name="NeuroH5", package_dir={"": "python"}, packages=["neuroh5"], - version="0.1.12", + version="0.1.13", maintainer="Ivan Raikov", maintainer_email="ivan.g.raikov@gmail.com", description="A parallel HDF5-based library for storage and processing of large-scale graphs and neural cell model attributes.", @@ -145,14 +149,14 @@ def build_extensions(self): long_description_content_type='text/markdown', url="http://github.com/iraikov/neuroh5", include_package_data=True, - entry_points={ - "console_scripts": [ - 'initrange=neuroh5.initrange:cli', - 'initprj=neuroh5.initprj:cli', - 'importdbs=neuroh5.importdbs:cli', - 'importcoords=neuroh5.importcoords:cli', - ] - }, +# entry_points={ +# "console_scripts": [ +# 'initrange=neuroh5.initrange:cli', +# 'initprj=neuroh5.initprj:cli', +# 'importdbs=neuroh5.importdbs:cli', +# 'importcoords=neuroh5.importcoords:cli', +# ] +# }, cmdclass={"build_ext": cmake_build_ext}, ext_modules=[CMakeExtension("neuroh5.io", target="python_neuroh5_io")], ) diff --git a/src/graph/append_graph.cc b/src/graph/append_graph.cc index 7e4c843..05df46f 100644 --- a/src/graph/append_graph.cc +++ b/src/graph/append_graph.cc @@ -5,7 +5,7 @@ /// Top-level functions for appending edge information to graphs in /// DBS (Destination Block Sparse) format. /// -/// Copyright (C) 2016-2021 Project NeuroH5. +/// Copyright (C) 2016-2024 Project NeuroH5. //============================================================================== @@ -144,6 +144,12 @@ namespace neuroh5 } throw_assert_nomsg(node_index.size() == total_num_nodes); + + if (total_num_nodes == 0) + { + throw_assert_nomsg(MPI_Barrier(all_comm) == MPI_SUCCESS); + return 0; + } set io_rank_set; data::range_sample(size, io_size, io_rank_set); diff --git a/src/graph/write_graph.cc b/src/graph/write_graph.cc index dc43a20..36d3d46 100644 --- a/src/graph/write_graph.cc +++ b/src/graph/write_graph.cc @@ -5,7 +5,7 @@ /// Top-level functions for writing graphs in DBS (Destination Block Sparse) /// format. /// -/// Copyright (C) 2016-2021 Project NeuroH5. +/// Copyright (C) 2016-2024 Project NeuroH5. //============================================================================== @@ -71,6 +71,8 @@ namespace neuroh5 size_t dst_start, dst_end; size_t src_start, src_end; + auto compare_nodes = [](const NODE_IDX_T& a, const NODE_IDX_T& b) { return (a < b); }; + int size, rank; throw_assert_nomsg(MPI_Comm_size(all_comm, &size) == MPI_SUCCESS); throw_assert_nomsg(MPI_Comm_rank(all_comm, &rank) == MPI_SUCCESS); @@ -106,6 +108,53 @@ namespace neuroh5 dst_end = dst_start + pop_ranges[dst_pop_idx].count; src_start = pop_ranges[src_pop_idx].start; src_end = src_start + pop_ranges[src_pop_idx].count; + + total_num_nodes = 0; + vector< NODE_IDX_T > node_index; + + { // Determine the destination node indices present in the input + // edge map across all ranks + size_t num_nodes = input_edge_map.size(); + vector sendbuf_num_nodes(size, num_nodes); + vector recvbuf_num_nodes(size); + vector recvcounts(size, 0); + vector displs(size+1, 0); + throw_assert_nomsg(MPI_Allgather(&sendbuf_num_nodes[0], 1, MPI_SIZE_T, + &recvbuf_num_nodes[0], 1, MPI_SIZE_T, all_comm) + == MPI_SUCCESS); + throw_assert_nomsg(MPI_Barrier(all_comm) == MPI_SUCCESS); + for (size_t p=0; p local_node_index; + for (auto iter: input_edge_map) + { + NODE_IDX_T dst = iter.first; + local_node_index.push_back(dst); + } + + node_index.resize(total_num_nodes,0); + throw_assert_nomsg(MPI_Allgatherv(&local_node_index[0], num_nodes, MPI_NODE_IDX_T, + &node_index[0], &recvcounts[0], &displs[0], MPI_NODE_IDX_T, + all_comm) == MPI_SUCCESS); + throw_assert_nomsg(MPI_Barrier(all_comm) == MPI_SUCCESS); + + vector p = sort_permutation(node_index, compare_nodes); + apply_permutation_in_place(node_index, p); + } + + throw_assert_nomsg(node_index.size() == total_num_nodes); + + if (total_num_nodes == 0) + { + throw_assert_nomsg(MPI_Barrier(all_comm) == MPI_SUCCESS); + return 0; + } + // Create an I/O communicator MPI_Comm io_comm; @@ -126,7 +175,6 @@ namespace neuroh5 compute_node_rank_map(io_size, total_num_nodes, node_rank_map); // construct a map where each set of edges are arranged by destination I/O rank - auto compare_nodes = [](const NODE_IDX_T& a, const NODE_IDX_T& b) { return (a < b); }; rank_edge_map_t rank_edge_map; for (auto iter : input_edge_map) {