From 6f8f639dfa1618a63b15106a2157636d20fb16c4 Mon Sep 17 00:00:00 2001 From: sifnoc Date: Tue, 12 Dec 2023 02:33:14 +0900 Subject: [PATCH] feat: bench runner --- .gitignore | 1 + Cargo.toml | 4 ++ benches/build_mst_with_workers.rs | 62 +++++++++++++++++++++++++++++++ 3 files changed, 67 insertions(+) create mode 100644 benches/build_mst_with_workers.rs diff --git a/.gitignore b/.gitignore index ea8c4bf..ee0a4c7 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ /target +/benches/csv diff --git a/Cargo.toml b/Cargo.toml index 657c5ca..2f72340 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,6 +26,10 @@ serde_yaml = "0.9.27" name = "mini-tree-server" path = "bin/mini_tree_server.rs" +[[bin]] +name = "orchestrator-bench" +path = "benches/build_mst_with_workers.rs" + [features] docker = [] docker-swarm = [] diff --git a/benches/build_mst_with_workers.rs b/benches/build_mst_with_workers.rs new file mode 100644 index 0000000..98c5413 --- /dev/null +++ b/benches/build_mst_with_workers.rs @@ -0,0 +1,62 @@ +#![feature(generic_const_exprs)] +use const_env::from_env; +use std::{error::Error, fs}; +use summa_aggregation::{executor::CloudSpawner, orchestrator::Orchestrator}; +use tokio::time::Instant; + +#[from_env] +const LEVELS: usize = 20; +#[from_env] +const CHUNK: usize = 32; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // We assume that custodians, when setting up their distributed environment, will obtain the URLs of worker nodes. + // In this example, we use two worker URLs corresponding to the workers spawned earlier. + // It is important to ensure that the number of URLs matches the number of executors. + let worker_node_urls = vec!["127.0.0.1:4000".to_string()]; + let total_workers = worker_node_urls.len(); + + const N_CURRENCIES: usize = 1; + const N_BYTES: usize = 14; + + // Read the directory and collect CSV file paths + let csv_directory = format!("benches/csv/level_{}/{}_chunks", LEVELS, CHUNK); + let csv_file_paths: Vec = fs::read_dir(csv_directory)? + .filter_map(|entry| { + let entry = entry.ok()?; + let path = entry.path(); + if path.is_file() && path.extension()? == "csv" { + Some(path.to_string_lossy().into_owned()) + } else { + None + } + }) + .collect(); + + println!( + "LEVELS: {}, N_CURRENCIES: {}, number_of_csv: {}, number_of_workers: {}, ", + LEVELS, + N_CURRENCIES, + csv_file_paths.len(), + worker_node_urls.len() + ); + + // The number of Executors must match the number of worker_node_urls. + let start = Instant::now(); + + let spawner = CloudSpawner::new(None, worker_node_urls.clone(), 4000); + + let orchestrator = + Orchestrator::::new(Box::new(spawner), csv_file_paths); + + let _aggregation_merkle_sum_tree = orchestrator + .create_aggregation_mst(worker_node_urls.len()) + .await + .unwrap(); + println!( + "Time to create aggregation merkle sum tree: {:?} s", + start.elapsed() + ); + Ok(()) +}