diff --git a/bench/abg_minibench.py b/bench/abg_minibench.py new file mode 100644 index 0000000..b109a67 --- /dev/null +++ b/bench/abg_minibench.py @@ -0,0 +1,37 @@ +import numpy as np +from mpi4py import MPI +import time + +comm_rank = MPI.COMM_WORLD.Get_rank() +comm_size = MPI.COMM_WORLD.Get_size() + + +if __name__ == "__main__": + # abg of all reduce + b_sizes = [256, 512, 1024] + n_iterations = 100 + + timings = np.zeros((len(b_sizes), n_iterations)) + + print(f"Bench Allreduce for P={comm_size}") + for b_i in b_sizes: + b_init = np.random.rand(b_i) + for i in range(n_iterations): + b = b_init.copy() + + tic = time.perf_counter() + MPI.COMM_WORLD.Allreduce( + MPI.IN_PLACE, + b, + op=MPI.SUM, + ) + toc = time.perf_counter() + + timings[b_i, i] = toc - tic + + if comm_rank == 0: + print(f" b_size: {b_i}") + print(f" mean: {np.mean(timings[b_i])}") + print(f" std: {np.std(timings[b_i])}") + + np.save(f"abg_allreduce_timings_{comm_size}.npy", timings) diff --git a/bench/slurm_abg_minibench.sh b/bench/slurm_abg_minibench.sh new file mode 100644 index 0000000..1f93df9 --- /dev/null +++ b/bench/slurm_abg_minibench.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +#SBATCH --job-name=serinv_generate_dataset +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a40:2 +#SBATCH --time=00:05:00 +#SBATCH --output=%x.%j.out +#SBATCH --error=%x.%j.err + +srun python abg_minibench.py \ No newline at end of file