diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml
new file mode 100644
index 0000000..f7ffac5
--- /dev/null
+++ b/.github/workflows/rust.yml
@@ -0,0 +1,23 @@
+name: Aggregation Test
+
+on:
+ push:
+ branches: ["*"]
+ pull_request:
+ branches: ["*"]
+
+env:
+ CARGO_TERM_COLOR: always
+
+jobs:
+ build-and-test:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Pull mini-tree image
+ run: docker pull summadev/summa-aggregation-mini-tree:latest
+
+ - name: Run tests
+ run: cargo test --features docker
diff --git a/Cargo.lock b/Cargo.lock
index f432263..fbf3065 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -114,6 +114,21 @@ dependencies = [
"syn 2.0.39",
]
+[[package]]
+name = "android-tzdata"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
+
+[[package]]
+name = "android_system_properties"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
+dependencies = [
+ "libc",
+]
+
[[package]]
name = "anyhow"
version = "1.0.75"
@@ -408,7 +423,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4"
dependencies = [
"block-padding",
- "generic-array",
+ "generic-array 0.14.7",
]
[[package]]
@@ -417,7 +432,7 @@ version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71"
dependencies = [
- "generic-array",
+ "generic-array 0.14.7",
]
[[package]]
@@ -454,6 +469,46 @@ dependencies = [
"subtle",
]
+[[package]]
+name = "bollard"
+version = "0.15.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f03db470b3c0213c47e978da93200259a1eb4dae2e5512cba9955e2b540a6fc6"
+dependencies = [
+ "base64 0.21.5",
+ "bollard-stubs",
+ "bytes",
+ "futures-core",
+ "futures-util",
+ "hex",
+ "http",
+ "hyper",
+ "hyperlocal",
+ "log",
+ "pin-project-lite",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "serde_repr",
+ "serde_urlencoded",
+ "thiserror",
+ "tokio",
+ "tokio-util",
+ "url",
+ "winapi",
+]
+
+[[package]]
+name = "bollard-stubs"
+version = "1.43.0-rc.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b58071e8fd9ec1e930efd28e3a90c1251015872a2ce49f81f36421b86466932e"
+dependencies = [
+ "serde",
+ "serde_repr",
+ "serde_with",
+]
+
[[package]]
name = "bs58"
version = "0.5.0"
@@ -502,18 +557,18 @@ dependencies = [
[[package]]
name = "cargo-platform"
-version = "0.1.4"
+version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "12024c4645c97566567129c204f65d5815a8c9aecf30fcbe682b2fe034996d36"
+checksum = "e34637b3140142bdf929fb439e8aa4ebad7651ebf7b1080b3930aa16ac1459ff"
dependencies = [
"serde",
]
[[package]]
name = "cargo_metadata"
-version = "0.17.0"
+version = "0.18.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e7daec1a2a2129eeba1644b220b4647ec537b0b5d4bfd6876fcc5a540056b592"
+checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037"
dependencies = [
"camino",
"cargo-platform",
@@ -544,7 +599,11 @@ version = "0.4.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38"
dependencies = [
+ "android-tzdata",
+ "iana-time-zone",
"num-traits",
+ "serde",
+ "windows-targets 0.48.5",
]
[[package]]
@@ -599,7 +658,7 @@ dependencies = [
"bech32",
"bs58",
"digest 0.10.7",
- "generic-array",
+ "generic-array 0.14.7",
"hex",
"ripemd",
"serde",
@@ -617,7 +676,7 @@ checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6"
dependencies = [
"is-terminal",
"lazy_static",
- "windows-sys",
+ "windows-sys 0.48.0",
]
[[package]]
@@ -679,9 +738,9 @@ checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e"
[[package]]
name = "core-foundation"
-version = "0.9.3"
+version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146"
+checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f"
dependencies = [
"core-foundation-sys",
"libc",
@@ -689,9 +748,9 @@ dependencies = [
[[package]]
name = "core-foundation-sys"
-version = "0.8.4"
+version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa"
+checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f"
[[package]]
name = "cpufeatures"
@@ -762,11 +821,11 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
[[package]]
name = "crypto-bigint"
-version = "0.5.4"
+version = "0.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "28f85c3514d2a6e64160359b45a3918c3b4178bcbf4ae5d03ab2d02e521c479a"
+checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76"
dependencies = [
- "generic-array",
+ "generic-array 0.14.7",
"rand_core 0.6.4",
"subtle",
"zeroize",
@@ -778,7 +837,7 @@ version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
dependencies = [
- "generic-array",
+ "generic-array 0.14.7",
"typenum",
]
@@ -824,11 +883,12 @@ dependencies = [
[[package]]
name = "deranged"
-version = "0.3.9"
+version = "0.3.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3"
+checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc"
dependencies = [
"powerfmt",
+ "serde",
]
[[package]]
@@ -856,7 +916,7 @@ version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066"
dependencies = [
- "generic-array",
+ "generic-array 0.14.7",
]
[[package]]
@@ -899,7 +959,7 @@ dependencies = [
"libc",
"option-ext",
"redox_users",
- "windows-sys",
+ "windows-sys 0.48.0",
]
[[package]]
@@ -962,9 +1022,9 @@ dependencies = [
[[package]]
name = "ecdsa"
-version = "0.16.8"
+version = "0.16.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a4b1e0c257a9e9f25f90ff76d7a68360ed497ee519c8e428d1825ef0000799d4"
+checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca"
dependencies = [
"der",
"digest 0.10.7",
@@ -982,15 +1042,15 @@ checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07"
[[package]]
name = "elliptic-curve"
-version = "0.13.6"
+version = "0.13.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d97ca172ae9dc9f9b779a6e3a65d308f2af74e5b8c921299075bdb4a0370e914"
+checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47"
dependencies = [
"base16ct",
"crypto-bigint",
"digest 0.10.7",
"ff",
- "generic-array",
+ "generic-array 0.14.7",
"group",
"pkcs8",
"rand_core 0.6.4",
@@ -1054,12 +1114,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]]
name = "errno"
-version = "0.3.6"
+version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7c18ee0ed65a5f1f81cac6b1d213b69c35fa47d4252ad41f1486dbd8226fe36e"
+checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245"
dependencies = [
"libc",
- "windows-sys",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -1134,9 +1194,9 @@ dependencies = [
[[package]]
name = "ethers"
-version = "2.0.10"
+version = "2.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1ad13497f6e0a24292fc7b408e30d22fe9dc262da1f40d7b542c3a44e7fc0476"
+checksum = "1a5344eea9b20effb5efeaad29418215c4d27017639fd1f908260f59cbbd226e"
dependencies = [
"ethers-addressbook",
"ethers-contract",
@@ -1150,9 +1210,9 @@ dependencies = [
[[package]]
name = "ethers-addressbook"
-version = "2.0.10"
+version = "2.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c6e9e8acd0ed348403cc73a670c24daba3226c40b98dc1a41903766b3ab6240a"
+checksum = "8c405f24ea3a517899ba7985385c43dc4a7eb1209af3b1e0a1a32d7dcc7f8d09"
dependencies = [
"ethers-core",
"once_cell",
@@ -1162,9 +1222,9 @@ dependencies = [
[[package]]
name = "ethers-contract"
-version = "2.0.10"
+version = "2.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d79269278125006bb0552349c03593ffa9702112ca88bc7046cc669f148fb47c"
+checksum = "0111ead599d17a7bff6985fd5756f39ca7033edc79a31b23026a8d5d64fa95cd"
dependencies = [
"const-hex",
"ethers-contract-abigen",
@@ -1181,9 +1241,9 @@ dependencies = [
[[package]]
name = "ethers-contract-abigen"
-version = "2.0.10"
+version = "2.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ce95a43c939b2e4e2f3191c5ad4a1f279780b8a39139c9905b43a7433531e2ab"
+checksum = "51258120c6b47ea9d9bec0d90f9e8af71c977fbefbef8213c91bfed385fe45eb"
dependencies = [
"Inflector",
"const-hex",
@@ -1203,9 +1263,9 @@ dependencies = [
[[package]]
name = "ethers-contract-derive"
-version = "2.0.10"
+version = "2.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8e9ce44906fc871b3ee8c69a695ca7ec7f70e50cb379c9b9cb5e532269e492f6"
+checksum = "936e7a0f1197cee2b62dc89f63eff3201dbf87c283ff7e18d86d38f83b845483"
dependencies = [
"Inflector",
"const-hex",
@@ -1219,9 +1279,9 @@ dependencies = [
[[package]]
name = "ethers-core"
-version = "2.0.10"
+version = "2.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c0a17f0708692024db9956b31d7a20163607d2745953f5ae8125ab368ba280ad"
+checksum = "2f03e0bdc216eeb9e355b90cf610ef6c5bb8aca631f97b5ae9980ce34ea7878d"
dependencies = [
"arrayvec 0.7.4",
"bytes",
@@ -1230,7 +1290,7 @@ dependencies = [
"const-hex",
"elliptic-curve",
"ethabi",
- "generic-array",
+ "generic-array 0.14.7",
"k256",
"num_enum",
"once_cell",
@@ -1249,10 +1309,11 @@ dependencies = [
[[package]]
name = "ethers-etherscan"
-version = "2.0.10"
+version = "2.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0e53451ea4a8128fbce33966da71132cf9e1040dcfd2a2084fd7733ada7b2045"
+checksum = "abbac2c890bdbe0f1b8e549a53b00e2c4c1de86bb077c1094d1f38cdf9381a56"
dependencies = [
+ "chrono",
"ethers-core",
"ethers-solc",
"reqwest",
@@ -1265,15 +1326,14 @@ dependencies = [
[[package]]
name = "ethers-middleware"
-version = "2.0.10"
+version = "2.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "473f1ccd0c793871bbc248729fa8df7e6d2981d6226e4343e3bbaa9281074d5d"
+checksum = "681ece6eb1d10f7cf4f873059a77c04ff1de4f35c63dd7bccde8f438374fcb93"
dependencies = [
"async-trait",
"auto_impl",
"ethers-contract",
"ethers-core",
- "ethers-etherscan",
"ethers-providers",
"ethers-signers",
"futures-channel",
@@ -1292,9 +1352,9 @@ dependencies = [
[[package]]
name = "ethers-providers"
-version = "2.0.10"
+version = "2.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6838fa110e57d572336178b7c79e94ff88ef976306852d8cb87d9e5b1fc7c0b5"
+checksum = "25d6c0c9455d93d4990c06e049abf9b30daf148cf461ee939c11d88907c60816"
dependencies = [
"async-trait",
"auto_impl",
@@ -1328,9 +1388,9 @@ dependencies = [
[[package]]
name = "ethers-signers"
-version = "2.0.10"
+version = "2.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5ea44bec930f12292866166f9ddbea6aa76304850e4d8dcd66dc492b43d00ff1"
+checksum = "0cb1b714e227bbd2d8c53528adb580b203009728b17d0d0e4119353aa9bc5532"
dependencies = [
"async-trait",
"coins-bip32",
@@ -1347,9 +1407,9 @@ dependencies = [
[[package]]
name = "ethers-solc"
-version = "2.0.10"
+version = "2.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "de34e484e7ae3cab99fbfd013d6c5dc7f9013676a4e0e414d8b12e1213e8b3ba"
+checksum = "a64f710586d147864cff66540a6d64518b9ff37d73ef827fee430538265b595f"
dependencies = [
"cfg-if",
"const-hex",
@@ -1378,29 +1438,29 @@ dependencies = [
[[package]]
name = "execute"
-version = "0.2.12"
+version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "16d9a9ea4c04632c16bc5c71a2fcc63d308481f7fc67eb1a1ce6315c44a426ae"
+checksum = "3a82608ee96ce76aeab659e9b8d3c2b787bffd223199af88c674923d861ada10"
dependencies = [
"execute-command-macro",
"execute-command-tokens",
- "generic-array",
+ "generic-array 1.0.0",
]
[[package]]
name = "execute-command-macro"
-version = "0.1.8"
+version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a5fbc65a0cf735106743f4c38c9a3671c1e734b5c2c20d21a3c93c696daa3157"
+checksum = "90dec53d547564e911dc4ff3ecb726a64cf41a6fa01a2370ebc0d95175dd08bd"
dependencies = [
"execute-command-macro-impl",
]
[[package]]
name = "execute-command-macro-impl"
-version = "0.1.9"
+version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "55a9a55d1dab3b07854648d48e366f684aefe2ac78ae28cec3bf65e3cd53d9a3"
+checksum = "ce8cd46a041ad005ab9c71263f9a0ff5b529eac0fe4cc9b4a20f4f0765d8cf4b"
dependencies = [
"execute-command-tokens",
"quote",
@@ -1409,15 +1469,15 @@ dependencies = [
[[package]]
name = "execute-command-tokens"
-version = "0.1.6"
+version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8ba569491c70ec8471e34aa7e9c0b9e82bb5d2464c0398442d17d3c4af814e5a"
+checksum = "69dc321eb6be977f44674620ca3aa21703cb20ffbe560e1ae97da08401ffbcad"
[[package]]
name = "eyre"
-version = "0.6.8"
+version = "0.6.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4c2b6b5a29c02cdc822728b7d7b8ae1bab3e3b05d44522770ddd49722eeac7eb"
+checksum = "80f656be11ddf91bd709454d15d5bd896fbaf4cc3314e69349e4d1569f5b46cd"
dependencies = [
"indenter",
"once_cell",
@@ -1544,9 +1604,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
[[package]]
name = "form_urlencoded"
-version = "1.2.0"
+version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652"
+checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
dependencies = [
"percent-encoding",
]
@@ -1692,6 +1752,15 @@ dependencies = [
"zeroize",
]
+[[package]]
+name = "generic-array"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fe739944a5406424e080edccb6add95685130b9f160d5407c639c7df0c5836b0"
+dependencies = [
+ "typenum",
+]
+
[[package]]
name = "getrandom"
version = "0.2.11"
@@ -1707,9 +1776,9 @@ dependencies = [
[[package]]
name = "gimli"
-version = "0.28.0"
+version = "0.28.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0"
+checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
[[package]]
name = "glob"
@@ -1744,9 +1813,9 @@ dependencies = [
[[package]]
name = "h2"
-version = "0.3.21"
+version = "0.3.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833"
+checksum = "4d6250322ef6e60f93f9a2162799302cd6f68f79f6e5d85c8c16f14d1d958178"
dependencies = [
"bytes",
"fnv",
@@ -1754,7 +1823,7 @@ dependencies = [
"futures-sink",
"futures-util",
"http",
- "indexmap 1.9.3",
+ "indexmap 2.1.0",
"slab",
"tokio",
"tokio-util",
@@ -1854,9 +1923,9 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
[[package]]
name = "hashbrown"
-version = "0.14.2"
+version = "0.14.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156"
+checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"
dependencies = [
"ahash",
"allocator-api2",
@@ -1919,14 +1988,14 @@ version = "0.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb"
dependencies = [
- "windows-sys",
+ "windows-sys 0.48.0",
]
[[package]]
name = "http"
-version = "0.2.9"
+version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482"
+checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb"
dependencies = [
"bytes",
"fnv",
@@ -1993,11 +2062,47 @@ dependencies = [
"tokio-native-tls",
]
+[[package]]
+name = "hyperlocal"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fafdf7b2b2de7c9784f76e02c0935e65a8117ec3b768644379983ab333ac98c"
+dependencies = [
+ "futures-util",
+ "hex",
+ "hyper",
+ "pin-project",
+ "tokio",
+]
+
+[[package]]
+name = "iana-time-zone"
+version = "0.1.58"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20"
+dependencies = [
+ "android_system_properties",
+ "core-foundation-sys",
+ "iana-time-zone-haiku",
+ "js-sys",
+ "wasm-bindgen",
+ "windows-core",
+]
+
+[[package]]
+name = "iana-time-zone-haiku"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
+dependencies = [
+ "cc",
+]
+
[[package]]
name = "idna"
-version = "0.4.0"
+version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c"
+checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6"
dependencies = [
"unicode-bidi",
"unicode-normalization",
@@ -2055,6 +2160,7 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
dependencies = [
"autocfg",
"hashbrown 0.12.3",
+ "serde",
]
[[package]]
@@ -2064,7 +2170,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f"
dependencies = [
"equivalent",
- "hashbrown 0.14.2",
+ "hashbrown 0.14.3",
+ "serde",
]
[[package]]
@@ -2073,7 +2180,7 @@ version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5"
dependencies = [
- "generic-array",
+ "generic-array 0.14.7",
]
[[package]]
@@ -2112,7 +2219,7 @@ checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b"
dependencies = [
"hermit-abi",
"rustix",
- "windows-sys",
+ "windows-sys 0.48.0",
]
[[package]]
@@ -2159,9 +2266,9 @@ checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
[[package]]
name = "js-sys"
-version = "0.3.65"
+version = "0.3.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8"
+checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca"
dependencies = [
"wasm-bindgen",
]
@@ -2182,9 +2289,9 @@ dependencies = [
[[package]]
name = "k256"
-version = "0.13.1"
+version = "0.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc"
+checksum = "3f01b677d82ef7a676aa37e099defd83a28e15687112cafdd112d60236b6115b"
dependencies = [
"cfg-if",
"ecdsa",
@@ -2265,9 +2372,9 @@ dependencies = [
[[package]]
name = "linux-raw-sys"
-version = "0.4.11"
+version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829"
+checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456"
[[package]]
name = "lock_api"
@@ -2361,7 +2468,7 @@ checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0"
dependencies = [
"libc",
"wasi",
- "windows-sys",
+ "windows-sys 0.48.0",
]
[[package]]
@@ -2393,7 +2500,7 @@ dependencies = [
"blstrs",
"byteorder",
"ff",
- "generic-array",
+ "generic-array 0.14.7",
"itertools 0.8.2",
"log",
"pasta_curves",
@@ -2444,7 +2551,7 @@ dependencies = [
"digest 0.10.7",
"ff",
"flate2",
- "generic-array",
+ "generic-array 0.14.7",
"getrandom",
"halo2curves 0.1.0",
"itertools 0.11.0",
@@ -2643,9 +2750,9 @@ dependencies = [
[[package]]
name = "openssl"
-version = "0.10.59"
+version = "0.10.60"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a257ad03cd8fb16ad4172fedf8094451e1af1c4b70097636ef2eac9a5f0cc33"
+checksum = "79a4c6c3a2b158f7f8f2a2fc5a969fa3a068df6fc9dbb4a43845436e3af7c800"
dependencies = [
"bitflags 2.4.1",
"cfg-if",
@@ -2675,9 +2782,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
[[package]]
name = "openssl-sys"
-version = "0.9.95"
+version = "0.9.96"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "40a4130519a360279579c2053038317e40eff64d13fd3f004f9e1b72b8a6aaf9"
+checksum = "3812c071ba60da8b5677cc12bcb1d42989a65553772897a7e0355545a819838f"
dependencies = [
"cc",
"libc",
@@ -2702,9 +2809,9 @@ dependencies = [
[[package]]
name = "parity-scale-codec"
-version = "3.6.5"
+version = "3.6.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0dec8a8073036902368c2cdc0387e85ff9a37054d7e7c98e592145e0c92cd4fb"
+checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe"
dependencies = [
"arrayvec 0.7.4",
"bitvec",
@@ -2716,11 +2823,11 @@ dependencies = [
[[package]]
name = "parity-scale-codec-derive"
-version = "3.6.5"
+version = "3.6.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "312270ee71e1cd70289dacf597cab7b207aa107d2f28191c2ae45b2ece18a260"
+checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b"
dependencies = [
- "proc-macro-crate 1.3.1",
+ "proc-macro-crate 2.0.0",
"proc-macro2",
"quote",
"syn 1.0.109",
@@ -2746,7 +2853,7 @@ dependencies = [
"libc",
"redox_syscall",
"smallvec",
- "windows-targets",
+ "windows-targets 0.48.5",
]
[[package]]
@@ -2821,9 +2928,9 @@ dependencies = [
[[package]]
name = "percent-encoding"
-version = "2.3.0"
+version = "2.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94"
+checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
[[package]]
name = "petgraph"
@@ -3050,9 +3157,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
-version = "1.0.69"
+version = "1.0.70"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da"
+checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b"
dependencies = [
"unicode-ident",
]
@@ -3324,7 +3431,7 @@ dependencies = [
"bitflags 2.4.1",
"bitvec",
"enumn",
- "hashbrown 0.14.2",
+ "hashbrown 0.14.3",
"hex",
]
@@ -3386,9 +3493,9 @@ dependencies = [
[[package]]
name = "ruint"
-version = "1.11.0"
+version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "724fd11728a3804e9944b14cab63825024c40bf42f8af87c8b5d97c4bbacf426"
+checksum = "608a5726529f2f0ef81b8fde9873c4bb829d6b5b5ca6be4d97345ddf0749c825"
dependencies = [
"alloy-rlp",
"proptest",
@@ -3428,15 +3535,15 @@ dependencies = [
[[package]]
name = "rustix"
-version = "0.38.21"
+version = "0.38.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3"
+checksum = "9470c4bf8246c8daf25f9598dca807fb6510347b1e1cfa55749113850c79d88a"
dependencies = [
"bitflags 2.4.1",
"errno",
"libc",
"linux-raw-sys",
- "windows-sys",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -3499,7 +3606,7 @@ version = "0.1.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88"
dependencies = [
- "windows-sys",
+ "windows-sys 0.48.0",
]
[[package]]
@@ -3528,7 +3635,7 @@ checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc"
dependencies = [
"base16ct",
"der",
- "generic-array",
+ "generic-array 0.14.7",
"pkcs8",
"subtle",
"zeroize",
@@ -3590,9 +3697,9 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73"
[[package]]
name = "serde"
-version = "1.0.192"
+version = "1.0.193"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001"
+checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89"
dependencies = [
"serde_derive",
]
@@ -3608,9 +3715,9 @@ dependencies = [
[[package]]
name = "serde_derive"
-version = "1.0.192"
+version = "1.0.193"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1"
+checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3"
dependencies = [
"proc-macro2",
"quote",
@@ -3638,6 +3745,17 @@ dependencies = [
"serde",
]
+[[package]]
+name = "serde_repr"
+version = "0.1.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3081f5ffbb02284dda55132aa26daecedd7372a42417bbbab6f14ab7d6bb9145"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.39",
+]
+
[[package]]
name = "serde_spanned"
version = "0.6.4"
@@ -3659,6 +3777,35 @@ dependencies = [
"serde",
]
+[[package]]
+name = "serde_with"
+version = "3.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "64cd236ccc1b7a29e7e2739f27c0b2dd199804abc4290e32f59f3b68d6405c23"
+dependencies = [
+ "base64 0.21.5",
+ "chrono",
+ "hex",
+ "indexmap 1.9.3",
+ "indexmap 2.1.0",
+ "serde",
+ "serde_json",
+ "time",
+]
+
+[[package]]
+name = "serde_yaml"
+version = "0.9.27"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3cc7a1570e38322cfe4154732e5110f887ea57e22b76f4bfd32b5bdd3368666c"
+dependencies = [
+ "indexmap 2.1.0",
+ "itoa",
+ "ryu",
+ "serde",
+ "unsafe-libyaml",
+]
+
[[package]]
name = "sha2"
version = "0.10.8"
@@ -3703,9 +3850,9 @@ dependencies = [
[[package]]
name = "signature"
-version = "2.1.0"
+version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500"
+checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de"
dependencies = [
"digest 0.10.7",
"rand_core 0.6.4",
@@ -3747,7 +3894,7 @@ checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970"
[[package]]
name = "snark-verifier"
version = "0.1.1"
-source = "git+https://github.com/privacy-scaling-explorations/snark-verifier#fedd7a8ffa44082c20a63e101484e0991ee7cce0"
+source = "git+https://github.com/privacy-scaling-explorations/snark-verifier#9feead7d4dbad951e6aa1d572230b1c098ec8040"
dependencies = [
"ecc",
"halo2_proofs",
@@ -3768,7 +3915,7 @@ dependencies = [
[[package]]
name = "snark-verifier-sdk"
version = "0.1.2"
-source = "git+https://github.com/privacy-scaling-explorations/snark-verifier#fedd7a8ffa44082c20a63e101484e0991ee7cce0"
+source = "git+https://github.com/privacy-scaling-explorations/snark-verifier#9feead7d4dbad951e6aa1d572230b1c098ec8040"
dependencies = [
"ark-std",
"bincode",
@@ -3807,14 +3954,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9"
dependencies = [
"libc",
- "windows-sys",
+ "windows-sys 0.48.0",
]
[[package]]
name = "solang-parser"
-version = "0.3.2"
+version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7cb9fa2fa2fa6837be8a2495486ff92e3ffe68a99b6eeba288e139efdd842457"
+checksum = "c425ce1c59f4b154717592f0bdf4715c3a1d55058883622d3157e1f0908a5b26"
dependencies = [
"itertools 0.11.0",
"lalrpop",
@@ -3841,9 +3988,9 @@ dependencies = [
[[package]]
name = "spki"
-version = "0.7.2"
+version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a"
+checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d"
dependencies = [
"base64ct",
"der",
@@ -3930,20 +4077,26 @@ name = "summa-aggregation"
version = "0.1.0"
dependencies = [
"axum",
+ "bollard",
"const_env",
+ "csv",
+ "futures",
"halo2_proofs",
"num-bigint 0.4.4",
"rand 0.8.5",
+ "reqwest",
"serde",
"serde_json",
+ "serde_yaml",
"summa-backend",
"tokio",
+ "tokio-util",
]
[[package]]
name = "summa-backend"
version = "0.1.0"
-source = "git+https://github.com/summa-dev/summa-solvency?branch=v1-improvements-and-consolidation#8ab0b07587ced37d341266a73c187bb22e03560e"
+source = "git+https://github.com/summa-dev/summa-solvency?branch=v1-improvements-and-consolidation#89c228dee2c41b9592016789717446c71ff06bec"
dependencies = [
"base64 0.13.1",
"bincode",
@@ -3952,6 +4105,7 @@ dependencies = [
"futures",
"halo2_proofs",
"num-bigint 0.4.4",
+ "num-traits",
"reqwest",
"serde",
"serde_json",
@@ -3963,7 +4117,7 @@ dependencies = [
[[package]]
name = "summa-solvency"
version = "0.1.0"
-source = "git+https://github.com/summa-dev/summa-solvency?branch=v1-improvements-and-consolidation#8ab0b07587ced37d341266a73c187bb22e03560e"
+source = "git+https://github.com/summa-dev/summa-solvency?branch=v1-improvements-and-consolidation#89c228dee2c41b9592016789717446c71ff06bec"
dependencies = [
"ark-std",
"csv",
@@ -4053,7 +4207,7 @@ dependencies = [
"fastrand",
"redox_syscall",
"rustix",
- "windows-sys",
+ "windows-sys 0.48.0",
]
[[package]]
@@ -4165,7 +4319,7 @@ dependencies = [
"signal-hook-registry",
"socket2 0.5.5",
"tokio-macros",
- "windows-sys",
+ "windows-sys 0.48.0",
]
[[package]]
@@ -4205,14 +4359,14 @@ dependencies = [
[[package]]
name = "toml"
-version = "0.7.8"
+version = "0.8.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257"
+checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35"
dependencies = [
"serde",
"serde_spanned",
"toml_datetime",
- "toml_edit 0.19.15",
+ "toml_edit 0.21.0",
]
[[package]]
@@ -4231,8 +4385,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
dependencies = [
"indexmap 2.1.0",
- "serde",
- "serde_spanned",
"toml_datetime",
"winnow",
]
@@ -4248,6 +4400,19 @@ dependencies = [
"winnow",
]
+[[package]]
+name = "toml_edit"
+version = "0.21.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03"
+dependencies = [
+ "indexmap 2.1.0",
+ "serde",
+ "serde_spanned",
+ "toml_datetime",
+ "winnow",
+]
+
[[package]]
name = "tower"
version = "0.4.13"
@@ -4386,6 +4551,12 @@ version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c"
+[[package]]
+name = "unsafe-libyaml"
+version = "0.2.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f28467d3e1d3c6586d8f25fa243f544f5800fec42d97032474e17222c2b75cfa"
+
[[package]]
name = "untrusted"
version = "0.7.1"
@@ -4394,9 +4565,9 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a"
[[package]]
name = "url"
-version = "2.4.1"
+version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5"
+checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633"
dependencies = [
"form_urlencoded",
"idna",
@@ -4458,9 +4629,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
-version = "0.2.88"
+version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce"
+checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e"
dependencies = [
"cfg-if",
"serde",
@@ -4470,9 +4641,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-backend"
-version = "0.2.88"
+version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217"
+checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826"
dependencies = [
"bumpalo",
"log",
@@ -4485,9 +4656,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-futures"
-version = "0.4.38"
+version = "0.4.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9afec9963e3d0994cac82455b2b3502b81a7f40f9a0d32181f7528d9f4b43e02"
+checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12"
dependencies = [
"cfg-if",
"js-sys",
@@ -4497,9 +4668,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro"
-version = "0.2.88"
+version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2"
+checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@@ -4507,9 +4678,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
-version = "0.2.88"
+version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907"
+checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283"
dependencies = [
"proc-macro2",
"quote",
@@ -4532,15 +4703,15 @@ dependencies = [
[[package]]
name = "wasm-bindgen-shared"
-version = "0.2.88"
+version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b"
+checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f"
[[package]]
name = "web-sys"
-version = "0.3.65"
+version = "0.3.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5db499c5f66323272151db0e666cd34f78617522fb0c1604d31a27c50c206a85"
+checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f"
dependencies = [
"js-sys",
"wasm-bindgen",
@@ -4589,13 +4760,31 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+[[package]]
+name = "windows-core"
+version = "0.51.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64"
+dependencies = [
+ "windows-targets 0.48.5",
+]
+
[[package]]
name = "windows-sys"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
dependencies = [
- "windows-targets",
+ "windows-targets 0.48.5",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
+dependencies = [
+ "windows-targets 0.52.0",
]
[[package]]
@@ -4604,13 +4793,28 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
dependencies = [
- "windows_aarch64_gnullvm",
- "windows_aarch64_msvc",
- "windows_i686_gnu",
- "windows_i686_msvc",
- "windows_x86_64_gnu",
- "windows_x86_64_gnullvm",
- "windows_x86_64_msvc",
+ "windows_aarch64_gnullvm 0.48.5",
+ "windows_aarch64_msvc 0.48.5",
+ "windows_i686_gnu 0.48.5",
+ "windows_i686_msvc 0.48.5",
+ "windows_x86_64_gnu 0.48.5",
+ "windows_x86_64_gnullvm 0.48.5",
+ "windows_x86_64_msvc 0.48.5",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
+dependencies = [
+ "windows_aarch64_gnullvm 0.52.0",
+ "windows_aarch64_msvc 0.52.0",
+ "windows_i686_gnu 0.52.0",
+ "windows_i686_msvc 0.52.0",
+ "windows_x86_64_gnu 0.52.0",
+ "windows_x86_64_gnullvm 0.52.0",
+ "windows_x86_64_msvc 0.52.0",
]
[[package]]
@@ -4619,42 +4823,84 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
+
[[package]]
name = "windows_aarch64_msvc"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
+
[[package]]
name = "windows_i686_gnu"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
+
[[package]]
name = "windows_i686_msvc"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
+
[[package]]
name = "windows_x86_64_gnu"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
+
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
+
[[package]]
name = "windows_x86_64_msvc"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
+
[[package]]
name = "winnow"
version = "0.5.19"
@@ -4671,7 +4917,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1"
dependencies = [
"cfg-if",
- "windows-sys",
+ "windows-sys 0.48.0",
]
[[package]]
@@ -4720,18 +4966,18 @@ dependencies = [
[[package]]
name = "zerocopy"
-version = "0.7.25"
+version = "0.7.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8cd369a67c0edfef15010f980c3cbe45d7f651deac2cd67ce097cd801de16557"
+checksum = "7d6f15f7ade05d2a4935e34a457b936c23dc70a05cc1d97133dc99e7a3fe0f0e"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
-version = "0.7.25"
+version = "0.7.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c2f140bda219a26ccc0cdb03dba58af72590c53b22642577d88a927bc5c87d6b"
+checksum = "dbbad221e3f78500350ecbd7dfa4e63ef945c05f4c61cb7f4d3f84cd0bba649b"
dependencies = [
"proc-macro2",
"quote",
@@ -4740,9 +4986,9 @@ dependencies = [
[[package]]
name = "zeroize"
-version = "1.6.0"
+version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
+checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d"
dependencies = [
"zeroize_derive",
]
diff --git a/Cargo.toml b/Cargo.toml
index 7883a98..657c5ca 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -11,11 +11,21 @@ const_env = "0.1.2"
num-bigint = "0.4.4"
serde = { version = "1.0.192", features = ["derive"] }
serde_json = "1.0.108"
-summa-backend = { git = "https://github.com/summa-dev/summa-solvency", branch = "v1-improvements-and-consolidation", version = "0.1.0" }
+summa-backend = { git = "https://github.com/summa-dev/summa-solvency", branch = "v1-improvements-and-consolidation" }
halo2_proofs = { git = "https://github.com/privacy-scaling-explorations/halo2", tag = "v2023_04_20"}
tokio = { version = "1.34.0", features = ["full"] }
-rand = "0.8"
+reqwest = "0.11.22"
+csv = "1.3.0"
+rand = "0.8.5"
+futures = "0.3.29"
+bollard = "0.15.0"
+tokio-util = "0.7.10"
+serde_yaml = "0.9.27"
[[bin]]
name = "mini-tree-server"
path = "bin/mini_tree_server.rs"
+
+[features]
+docker = []
+docker-swarm = []
diff --git a/Orchestrator-diagram.png b/Orchestrator-diagram.png
new file mode 100644
index 0000000..b4c807f
Binary files /dev/null and b/Orchestrator-diagram.png differ
diff --git a/README.md b/README.md
index d00879f..065bc9a 100644
--- a/README.md
+++ b/README.md
@@ -1,89 +1,90 @@
# Summa Aggregation
-## Orchestrator
-
-WIP
-
-## Mini Tree Generator
-
-- Build the Image
-
- To build the image, run the following command:
- ```
- docker build . -t summa-aggregation/mini-tree
- ```
-
-- Run the `Mini Tree Generator Container`
-
- Use the command below to start the Mini Tree Generator container:
-
- ```
- docker run -d -p 4000:4000 --name mini-tree-generator summa-aggretaion/mini-tree
- ```
-
-- Test with a Script
-
- To test, execute the provided script that send two `Entry` data to server:
- ```
- bash ./scripts/test_sending_entry.sh
- ```
-
- Upon successful execution, you will receive a response similar to the following
- (JSON output is prettified for clarity):
- ```Json
- {
- "root": {
- "hash": "0x2a4a7ae82b45b3800bdcd6364409e7ba9cac3d4598c546bd48952c234b5d2fb9",
- "balances": [
- "0x000000000000000000000000000000000000000000000000000000000001375f",
- "0x000000000000000000000000000000000000000000000000000000000000e9a6"
- ]
- },
- "nodes": [
- [
- {
- "hash": "0x0e113acd03b98f0bab0ef6f577245d5d008cbcc19ef2dab3608aa4f37f72a407",
- "balances": [
- "0x0000000000000000000000000000000000000000000000000000000000002e70",
- "0x000000000000000000000000000000000000000000000000000000000000a0cb"
- ]
- },
- {
- "hash": "0x17ef9d8ee0e2c8470814651413b71009a607a020214f749687384a7b7a7eb67a",
- "balances": [
- "0x00000000000000000000000000000000000000000000000000000000000108ef",
- "0x00000000000000000000000000000000000000000000000000000000000048db"
- ]
- }
- ],
- [
- {
- "hash": "0x2a4a7ae82b45b3800bdcd6364409e7ba9cac3d4598c546bd48952c234b5d2fb9",
- "balances": [
- "0x000000000000000000000000000000000000000000000000000000000001375f",
- "0x000000000000000000000000000000000000000000000000000000000000e9a6"
- ]
- }
- ]
- ],
- "depth": 1,
- "entries": [
- {
- "balances": [
- "11888",
- "41163"
- ],
- "username": "dxGaEAii"
- },
- {
- "balances": [
- "67823",
- "18651"
- ],
- "username": "MBlfbBGI"
- }
- ],
- "is_sorted": false
- }
- ```
+Summa Aggregation is a scalable solution specifically designed to accelerate the process of building Merkle sum tree. It addresses the time-intensive challenge of constructing these trees by enabling efficient scaling through parallelization and distributed computation across multiple machines.
+## Running test
+
+Tests can be run using the following command:
+
+```bash
+cargo test --release
+```
+
+Note: The Worker will run locally and uses port 4000 as the default for its server.
+Please ensure that this port is not already in use to avoid errors.
+
+## Running Additional Tests Involving Docker and Docker Swarm
+
+For additional tests involving Docker and Docker Swarm mode, the presence of the "summadev/summa-aggregation-mini-tree" image in the local Docker registry is required. Please refer to the [Mini Tree Server](bin/README.md) for more information about the mini tree.
+
+### Building the docker image
+
+Build the image using the following command:
+
+```bash
+docker build . -t summadev/summa-aggregation-mini-tree
+```
+
+### Downloading the Docker Image
+
+Alternatively, the image can be downloaded from Docker Hub:
+
+```bash
+docker pull summadev/summa-aggregation-mini-tree
+```
+
+### Testing with LocalSpawner
+
+The following command runs an additional test case using the LocalSpawner, which spawns worker containers in the local Docker environment. This extra test case involves running two containers during the testing process:
+
+```bash
+cargo test --features docker
+```
+
+### Testing with CloudSpawner
+
+For Summa-Aggregation, it's necessary to prepare a distributed environment where Workers can operate on remote machines, referred to as 'Nodes'. For guidance on setting up swarm nodes, please see [Getting Started with swarm mode](https://docs.docker.com/engine/swarm/swarm-tutorial)
+
+When the Docker environment is running successfully in Swarm mode, an additional test case that spawns workers on Swarm nodes using the `CloudSpawner` can be run:
+
+```bash
+cargo test --features docker-swarm
+```
+
+It is critical to ensure that the Docker Swarm includes at least one node connected to the manager node. Additionally, each worker node in the swarm must have the "summadev/summa-aggregation-mini-tree" image in its Docker registry. Without this image on nodes connected to the manager node, spawning workers on that node is not possible.
+
+## Summa Aggregation Example
+
+This example demonstrates the setup and operation of a distributed environment using Summa Aggregation, including the initialization of round and generating inclusion proof. A notable aspect of this demonstration is how the AggregationMerkleSumTree can produce the generation of inclusion proofs, similarly to the MerkleSumTree.
+
+### 1. Setup Distributed Environment
+
+Custodians can leverage any cloud infrastructure to establish worker nodes. In this example, we use two local servers running mini-tree services as workers, rather than deploying worker containers on remote nodes.
+
+Key steps:
+
+- **Spawning Worker Nodes**: Two local servers are spawned, each running a mini-tree service.
+
+- **Worker URLs**: It is crucial to ensure the number of worker URLs matches the number of executors. In this example, we use `127.0.0.1:4000` and `127.0.0.1:4001`.
+
+### 2. Initialize the Round with Aggregation Merkle Sum Tree
+
+Initiating the round with an `AggregationMerkleSumTree` is a key step after setting up the distributed environment with worker nodes. This process involves the `Orchestrator` and the `Round`.
+
+- **Orchestrator and AggregationMerkleSumTree**: The `Orchestrator` is initialized with the `CloudSpawner` and paths to the CSV files containing entry data. It uses this information to generate the `AggregationMerkleSumTree`, which forms the basis for the round's operations.
+
+- **Round Initialization**: Subsequently, the `Round` is initialized using the aggregation merkle sum tree. The `Round` is integral for interactions with the Summa contract and relies on the setup performed by the `Orchestrator`.
+
+### 3. Interact with the Summa Contract and Generate Proof of Inclusion
+
+The actual example only shows the creation of an inclusion proof.
+
+For detailed information on interaction patterns similar to those in the `summa-backend` example, refer to the ['summa_solvency_flow'](https://github.com/summa-dev/summa-solvency/blob/master/backend/examples/summa_solvency_flow.rs).
+
+### Example Execution
+
+Run the example using the following command:
+
+```bash
+cargo run --release --example aggregation_flow
+```
diff --git a/bin/README.md b/bin/README.md
new file mode 100644
index 0000000..e252863
--- /dev/null
+++ b/bin/README.md
@@ -0,0 +1,77 @@
+# Mini Tree Server
+
+Mini Tree Server is an Axum-based server that encapsulates the functionality of the Mini Tree Generator.
+
+## Test Mini Tree Server
+
+First, to start the Mini Tree Server, use the command:
+
+```bash
+ cargo run --release --bin mini-tree-server
+```
+
+Alternatively, if you have the summa-aggregation-mini-tree image locally, can run the server with this command:
+
+ ```bash
+ docker run -d -p 4000:4000 --name mini-tree-server-test summadev/summa-aggregation-mini-tree
+ ```
+
+For details on obtaining the summadev/summa-aggregation-mini-tree image, please refer to the [Building Image](../README.md#building-the-docker-image) and [Downloading Image](../README.md#downloading-the-docker-image) sections in the README.
+
+Second, send two entries to the Mini Tree Server, execute the following script:
+
+ ```bash
+ bash ./scripts/test_sending_entry.sh
+ ```
+
+Note: Execute this command from the project's root folder to ensure proper functioning of scripts.
+
+Upon successful execution, you will receive a response similar to the following:
+
+Click View Response
+
+```Json
+{
+ "root": {
+ "hash": "0x2a4a7ae82b45b3800bdcd6364409e7ba9cac3d4598c546bd48952c234b5d2fb9",
+ "balances": [
+ "0x000000000000000000000000000000000000000000000000000000000001375f",
+ "0x000000000000000000000000000000000000000000000000000000000000e9a6"
+ ]
+ },
+ "nodes": [
+ [
+ {
+ "hash": "0x0e113acd03b98f0bab0ef6f577245d5d008cbcc19ef2dab3608aa4f37f72a407",
+ "balances": [
+ "0x0000000000000000000000000000000000000000000000000000000000002e70",
+ "0x000000000000000000000000000000000000000000000000000000000000a0cb"
+ ]
+ },
+ {
+ "hash": "0x17ef9d8ee0e2c8470814651413b71009a607a020214f749687384a7b7a7eb67a",
+ "balances": [
+ "0x00000000000000000000000000000000000000000000000000000000000108ef",
+ "0x00000000000000000000000000000000000000000000000000000000000048db"
+ ]
+ }
+ ],
+ [
+ {
+ "hash": "0x2a4a7ae82b45b3800bdcd6364409e7ba9cac3d4598c546bd48952c234b5d2fb9",
+ "balances": [
+ "0x000000000000000000000000000000000000000000000000000000000001375f",
+ "0x000000000000000000000000000000000000000000000000000000000000e9a6"
+ ]
+ }
+ ]
+ ],
+ "depth": 1,
+ "is_sorted": false
+}
+```
+
+this JSON output is prettified for clarity
+
+
+
diff --git a/bin/mini_tree_server.rs b/bin/mini_tree_server.rs
index 709e4d2..ee67b05 100644
--- a/bin/mini_tree_server.rs
+++ b/bin/mini_tree_server.rs
@@ -1,36 +1,7 @@
-use axum::{extract::Json, http::StatusCode, response::IntoResponse, routing::post, Router};
-use const_env::from_env;
-use num_bigint::BigUint;
+use axum::{routing::post, Router};
use std::net::SocketAddr;
-use summa_backend::merkle_sum_tree::{Entry, MerkleSumTree, Node, Tree};
-use serde::{Deserialize, Serialize};
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct JsonNode {
- pub hash: String,
- pub balances: Vec,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct JsonEntry {
- balances: Vec,
- username: String,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct JsonMerkleSumTree {
- root: JsonNode,
- nodes: Vec>,
- depth: usize,
- entries: Vec,
- is_sorted: bool,
-}
-
-#[from_env]
-const N_ASSETS: usize = 2;
-#[from_env]
-const N_BYTES: usize = 14;
+use summa_aggregation::mini_tree_generator::create_mst;
#[tokio::main]
async fn main() {
@@ -38,7 +9,7 @@ async fn main() {
let app = Router::new().route("/", post(create_mst));
// Define the address to serve on
- let addr = SocketAddr::from(([0, 0, 0, 0], 4000)); // TODO: assign ports from env variable
+ let addr = SocketAddr::from(([0, 0, 0, 0], 4000));
// Start the server
axum::Server::bind(&addr)
@@ -46,51 +17,3 @@ async fn main() {
.await
.unwrap();
}
-
-fn convert_node_to_json(node: &Node) -> JsonNode {
- JsonNode {
- hash: format!("{:?}", node.hash),
- balances: node.balances.iter().map(|b| format!("{:?}", b)).collect(),
- }
-}
-
-async fn create_mst(
- Json(json_entries): Json>,
-) -> Result)> {
- // Convert `JsonEntry` -> `Entry`
- let entries = json_entries
- .iter()
- .map(|entry| {
- let mut balances: [BigUint; N_ASSETS] = std::array::from_fn(|_| BigUint::from(0u32));
- entry.balances.iter().enumerate().for_each(|(i, balance)| {
- balances[i] = balance.parse::().unwrap();
- });
- Entry::new(entry.username.clone(), balances).unwrap()
- })
- .collect::>>();
-
- // Create `MerkleSumTree` from `parsed_entries`
- let tree = MerkleSumTree::::from_entries(entries, false).unwrap();
-
- // Convert `MerkleSumTree` to `JsonMerkleSumTree`
- let json_tree = JsonMerkleSumTree {
- root: convert_node_to_json(&tree.root()),
- nodes: tree
- .nodes()
- .iter()
- .map(|layer| layer.iter().map(convert_node_to_json).collect())
- .collect(),
- depth: tree.depth().clone(),
- entries: tree
- .entries()
- .iter()
- .map(|entry| JsonEntry {
- balances: entry.balances().iter().map(|b| b.to_string()).collect(),
- username: entry.username().to_string(),
- })
- .collect(),
- is_sorted: false, // TODO: assign from request data
- };
-
- Ok((StatusCode::OK, Json(json_tree)))
-}
diff --git a/csv/entry_16.csv b/csv/entry_16.csv
new file mode 100644
index 0000000..83b2d1d
--- /dev/null
+++ b/csv/entry_16.csv
@@ -0,0 +1,17 @@
+username,balance_ETH_ETH,balance_USDT_ETH
+dxGaEAii,11888,41163
+MBlfbBGI,67823,18651
+lAhWlEWZ,18651,2087
+nuZweYtO,22073,55683
+gbdSwiuY,34897,83296
+RZNneNuP,83296,16881
+YsscHXkp,31699,35479
+RkLzkDun,2087,79731
+HlQlnEYI,30605,11888
+RqkZOFYe,16881,14874
+NjCSRAfD,41163,67823
+pHniJMQY,14874,22073
+dOGIMzKR,10032,10032
+HfMDmNLp,55683,34897
+xPLKzCBl,79731,30605
+AtwIxZHo,35479,31699
diff --git a/csv/entry_16_1.csv b/csv/entry_16_1.csv
new file mode 100644
index 0000000..83b2d1d
--- /dev/null
+++ b/csv/entry_16_1.csv
@@ -0,0 +1,17 @@
+username,balance_ETH_ETH,balance_USDT_ETH
+dxGaEAii,11888,41163
+MBlfbBGI,67823,18651
+lAhWlEWZ,18651,2087
+nuZweYtO,22073,55683
+gbdSwiuY,34897,83296
+RZNneNuP,83296,16881
+YsscHXkp,31699,35479
+RkLzkDun,2087,79731
+HlQlnEYI,30605,11888
+RqkZOFYe,16881,14874
+NjCSRAfD,41163,67823
+pHniJMQY,14874,22073
+dOGIMzKR,10032,10032
+HfMDmNLp,55683,34897
+xPLKzCBl,79731,30605
+AtwIxZHo,35479,31699
diff --git a/csv/entry_16_2.csv b/csv/entry_16_2.csv
new file mode 100644
index 0000000..273c579
--- /dev/null
+++ b/csv/entry_16_2.csv
@@ -0,0 +1,17 @@
+username,balance_ETH_ETH,balance_USDT_ETH
+aaGaEAaa,11888,41163
+bblfbBGI,67823,18651
+cchWlEWZ,18651,2087
+ddZweYtO,22073,55683
+eedSwiuY,34897,83296
+ffNneNuP,83296,16881
+ggscHXkp,31699,35479
+hhLzkDun,2087,79731
+iiQlnEYI,30605,11888
+llkZOFYe,16881,14874
+mmCSRAfD,41163,67823
+nnniJMQY,14874,22073
+ooGIMzKR,10032,10032
+ppMDmNLp,55683,34897
+qqLKzCBl,79731,30605
+rrwIxZHo,35479,31699
diff --git a/csv/entry_16_3.csv b/csv/entry_16_3.csv
new file mode 100644
index 0000000..a280358
--- /dev/null
+++ b/csv/entry_16_3.csv
@@ -0,0 +1,17 @@
+username,balance_ETH_ETH,balance_USDT_ETH
+a1GaEAaa,11888,41163
+b2lfbBGI,67823,18651
+c3hWlEWZ,18651,2087
+d4ZweYtO,22073,55683
+e5dSwiuY,34897,83296
+f6NneNuP,83296,16881
+g7scHXkp,31699,35479
+h8LzkDun,2087,79731
+i9QlnEYI,30605,11888
+l0kZOFYe,16881,14874
+m1CSRAfD,41163,67823
+n2niJMQY,14874,22073
+o3GIMzKR,10032,10032
+p4MDmNLp,55683,34897
+q5LKzCBl,79731,30605
+r6wIxZHo,35479,31699
diff --git a/csv/entry_16_4.csv b/csv/entry_16_4.csv
new file mode 100644
index 0000000..880e9b3
--- /dev/null
+++ b/csv/entry_16_4.csv
@@ -0,0 +1,17 @@
+username,balance_ETH_ETH,balance_USDT_ETH
+a17aEAaa,11888,41163
+b28fbBGI,67823,18651
+c39WlEWZ,18651,2087
+d40weYtO,22073,55683
+e51SwiuY,34897,83296
+f62neNuP,83296,16881
+g73cHXkp,31699,35479
+h84zkDun,2087,79731
+i95lnEYI,30605,11888
+l06ZOFYe,16881,14874
+m17SRAfD,41163,67823
+n28iJMQY,14874,22073
+o39IMzKR,10032,10032
+p40DmNLp,55683,34897
+q51KzCBl,79731,30605
+r62IxZHo,35479,31699
diff --git a/csv/entry_16_no_overflow.csv b/csv/entry_16_no_overflow.csv
new file mode 100644
index 0000000..f55f626
--- /dev/null
+++ b/csv/entry_16_no_overflow.csv
@@ -0,0 +1,17 @@
+username,balance_ETH_ETH,balance_USDT_ETH
+dxGaEAii,18446744073709551615,0
+MBlfbBGI,0,18446744073709551615
+lAhWlEWZ,0,0
+nuZweYtO,0,0
+gbdSwiuY,0,0
+RZNneNuP,0,0
+YsscHXkp,0,0
+RkLzkDun,0,0
+HlQlnEYI,0,0
+RqkZOFYe,0,0
+NjCSRAfD,0,0
+pHniJMQY,0,0
+dOGIMzKR,0,0
+HfMDmNLp,0,0
+xPLKzCBl,0,0
+AtwIxZHo,0,0
diff --git a/csv/entry_64.csv b/csv/entry_64.csv
new file mode 100644
index 0000000..56b7f39
--- /dev/null
+++ b/csv/entry_64.csv
@@ -0,0 +1,65 @@
+username,balance_ETH_ETH,balance_USDT_ETH
+dxGaEAii,11888,41163
+MBlfbBGI,67823,18651
+lAhWlEWZ,18651,2087
+nuZweYtO,22073,55683
+gbdSwiuY,34897,83296
+RZNneNuP,83296,16881
+YsscHXkp,31699,35479
+RkLzkDun,2087,79731
+HlQlnEYI,30605,11888
+RqkZOFYe,16881,14874
+NjCSRAfD,41163,67823
+pHniJMQY,14874,22073
+dOGIMzKR,10032,10032
+HfMDmNLp,55683,34897
+xPLKzCBl,79731,30605
+AtwIxZHo,35479,31699
+aaGaEAaa,11888,41163
+bblfbBGI,67823,18651
+cchWlEWZ,18651,2087
+ddZweYtO,22073,55683
+eedSwiuY,34897,83296
+ffNneNuP,83296,16881
+ggscHXkp,31699,35479
+hhLzkDun,2087,79731
+iiQlnEYI,30605,11888
+llkZOFYe,16881,14874
+mmCSRAfD,41163,67823
+nnniJMQY,14874,22073
+ooGIMzKR,10032,10032
+ppMDmNLp,55683,34897
+qqLKzCBl,79731,30605
+rrwIxZHo,35479,31699
+a1GaEAaa,11888,41163
+b2lfbBGI,67823,18651
+c3hWlEWZ,18651,2087
+d4ZweYtO,22073,55683
+e5dSwiuY,34897,83296
+f6NneNuP,83296,16881
+g7scHXkp,31699,35479
+h8LzkDun,2087,79731
+i9QlnEYI,30605,11888
+l0kZOFYe,16881,14874
+m1CSRAfD,41163,67823
+n2niJMQY,14874,22073
+o3GIMzKR,10032,10032
+p4MDmNLp,55683,34897
+q5LKzCBl,79731,30605
+r6wIxZHo,35479,31699
+a17aEAaa,11888,41163
+b28fbBGI,67823,18651
+c39WlEWZ,18651,2087
+d40weYtO,22073,55683
+e51SwiuY,34897,83296
+f62neNuP,83296,16881
+g73cHXkp,31699,35479
+h84zkDun,2087,79731
+i95lnEYI,30605,11888
+l06ZOFYe,16881,14874
+m17SRAfD,41163,67823
+n28iJMQY,14874,22073
+o39IMzKR,10032,10032
+p40DmNLp,55683,34897
+q51KzCBl,79731,30605
+r62IxZHo,35479,31699
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000..6155cd4
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,20 @@
+version: '3.8'
+
+# CloudSpawner only reads from the first element of lists
+services:
+ mini_tree:
+ image: summadev/summa-aggregation-mini-tree:latest
+ ports:
+ - published: 4000
+ target: 4000
+ deploy:
+ replicas: 2
+ placement:
+ constraints:
+ - node.role == worker
+ networks:
+ - mini_tree
+
+networks:
+ mini_tree:
+ driver: overlay
diff --git a/examples/aggregation_flow.rs b/examples/aggregation_flow.rs
new file mode 100644
index 0000000..7a3ec44
--- /dev/null
+++ b/examples/aggregation_flow.rs
@@ -0,0 +1,106 @@
+#![feature(generic_const_exprs)]
+use axum::{routing::post, Router};
+use std::error::Error;
+use std::net::SocketAddr;
+
+use summa_aggregation::{
+ executor::CloudSpawner, mini_tree_generator::create_mst, orchestrator::Orchestrator,
+};
+use summa_backend::{
+ apis::round::Round,
+ contracts::signer::{AddressInput, SummaSigner},
+ tests::initialize_test_env,
+};
+
+#[tokio::main]
+async fn main() -> Result<(), Box> {
+ // 1. Setup Distributed Environment
+ //
+ // Custodians can use any cloud infrastructure to set up worker nodes.
+ // In this example, instead of spawning worker containers on remote nodes, we will use two local servers running mini-tree services as workers.
+
+ // Spawning Worker_1
+ tokio::spawn(async move {
+ let app = Router::new().route("/", post(create_mst));
+ let addr = SocketAddr::from(([0, 0, 0, 0], 4000));
+ axum::Server::bind(&addr)
+ .serve(app.into_make_service())
+ .await
+ .unwrap();
+ });
+
+ // Spawning Worker_2
+ tokio::spawn(async move {
+ let app = Router::new().route("/", post(create_mst));
+ let addr = SocketAddr::from(([0, 0, 0, 0], 4001));
+ axum::Server::bind(&addr)
+ .serve(app.into_make_service())
+ .await
+ .unwrap();
+ });
+
+ // We assume that custodians, when setting up their distributed environment, will obtain the URLs of worker nodes.
+ // In this example, we use two worker URLs corresponding to the workers spawned earlier.
+ // It is important to ensure that the number of URLs matches the number of executors.
+ let worker_node_urls = vec!["127.0.0.1:4000".to_string(), "127.0.0.1:4001".to_string()];
+
+ // To initiate the Round, a SummaSigner instance and its corresponding SummaContract instance are required.
+ // Here, we initialize the signer with a specified private key and the Summa contract's address.
+ let (anvil, _, _, _, summa_contract) = initialize_test_env(None).await;
+ let signer = SummaSigner::new(
+ "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80",
+ anvil.endpoint().as_str(),
+ AddressInput::Address(summa_contract.address()),
+ )
+ .await?;
+ // Up to this point, the above code logic can be viewed as the Custodian's cloud infra structure setup process for Summa-Aggregation.
+
+ // 2. Initialize the Round with Aggregtaion Merkle Sum Tree
+ //
+ // Setting parameters for the aggregation merkle sum tree:
+ //
+ // LEVELS: This defines the number of levels in the aggregation merkle sum tree, including the mini-tree level.
+ const LEVELS: usize = 5;
+ // N_CURRENCIES: Specifies the number of currencies in the entry data.
+ const N_CURRENCIES: usize = 2;
+ // N_BYTES: Determines the maximum total balance allowed for each currency, calculated as 1 << (8 * 14) = 2^112.
+ const N_BYTES: usize = 14;
+ // Note: These parameters should match those in the Summa contract.
+
+ // CloudSpawner does not depend on a `docker-compose.yml` file or a `service_name` for creating workers.
+ // This implies that `service_info` is not necessary. When `service_info` is absent, CloudSpawner creates an Executor solely based on the `worker_node_url`.
+ let spawner = CloudSpawner::new(None, worker_node_urls, 4000);
+ let orchestrator = Orchestrator::::new(
+ Box::new(spawner),
+ vec![
+ "csv/entry_16_1.csv".to_string(),
+ "csv/entry_16_2.csv".to_string(),
+ ],
+ );
+
+ // The number of Executors must match the number of worker_node_urls.
+ let aggregation_merkle_sum_tree = orchestrator.create_aggregation_mst(2).await.unwrap();
+
+ // After obtaining the aggregation merkle sum tree, we proceed to initialize the round.
+ let timestamp = 1u64;
+ let params_path = "examples/hermez-raw-11";
+ let round = Round::::new(
+ &signer,
+ Box::new(aggregation_merkle_sum_tree),
+ params_path,
+ timestamp,
+ )
+ .unwrap();
+
+ // 3. Interact with the Summa Contract and Generate Proof of Inclusion
+ //
+ // Interactions with the Summa contract, such as sending Commitment or AddressOwnership, are similar to those in the `summa-backend` example, particularly 'summa_solvency_flow'.
+ // For detailed information, refer to the example at: https://github.com/summa-dev/summa-solvency/blob/master/backend/examples/summa_solvency_flow.rs
+ //
+ // Here, we demonstrate generating the proof of inclusion for User 0.
+ let inclusion_proof_of_user0 = round.get_proof_of_inclusion(0).unwrap();
+ assert!(!inclusion_proof_of_user0.get_public_inputs().is_empty()); // Check public input counts
+
+ println!("Generated User 0 proof of inclusion");
+ Ok(())
+}
diff --git a/examples/hermez-raw-11 b/examples/hermez-raw-11
new file mode 100644
index 0000000..2205b72
Binary files /dev/null and b/examples/hermez-raw-11 differ
diff --git a/scripts/test_sending_entry.sh b/scripts/test_sending_entry.sh
index 3e13652..034378a 100644
--- a/scripts/test_sending_entry.sh
+++ b/scripts/test_sending_entry.sh
@@ -10,13 +10,38 @@ else
fi
# The curl command with the variable PORT
-curl -X POST http://localhost:$PORT/ \
+curl -X POST http://localhost:$PORT \
+ -vvv \
-H "Content-Type: application/json" \
-d '[
{
"balances": ["11888", "41163"],
"username": "dxGaEAii"
},
+ {
+ "balances": ["67823", "18651"],
+ "username": "MBlfbBGI"
+ },
+ {
+ "balances": ["11888", "41163"],
+ "username": "dxGaEAii"
+ },
+ {
+ "balances": ["67823", "18651"],
+ "username": "MBlfbBGI"
+ },
+ {
+ "balances": ["11888", "41163"],
+ "username": "dxGaEAii"
+ },
+ {
+ "balances": ["67823", "18651"],
+ "username": "MBlfbBGI"
+ },
+ {
+ "balances": ["11888", "41163"],
+ "username": "dxGaEAii"
+ },
{
"balances": ["67823", "18651"],
"username": "MBlfbBGI"
diff --git a/src/aggregation_merkle_sum_tree.rs b/src/aggregation_merkle_sum_tree.rs
index 5eacc4c..0e4e884 100644
--- a/src/aggregation_merkle_sum_tree.rs
+++ b/src/aggregation_merkle_sum_tree.rs
@@ -1,29 +1,32 @@
use halo2_proofs::halo2curves::bn256::Fr as Fp;
-use num_bigint::BigUint;
-use summa_backend::merkle_sum_tree::utils::{build_merkle_tree_from_leaves, fp_to_big_uint};
-use summa_backend::merkle_sum_tree::{Entry, MerkleProof, MerkleSumTree, Node, Tree};
+use std::error::Error;
+use summa_backend::merkle_sum_tree::utils::build_merkle_tree_from_leaves;
+use summa_backend::merkle_sum_tree::{
+ Cryptocurrency, Entry, MerkleProof, MerkleSumTree, Node, Tree,
+};
/// Aggregation Merkle Sum Tree Data Structure.
///
-/// Starting from a set of "mini" Merkle Sum Trees of equal depth, N_ASSETS and N_BYTES, the Aggregation Merkle Sum Tree inherits the properties of a Merkle Sum Tree and adds the following:
+/// Starting from a set of "mini" Merkle Sum Tree of equal depth, N_CURRENCIES and N_BYTES, the Aggregation Merkle Sum Tree inherits the properties of a Merkle Sum Tree and adds the following:
/// * Each Leaf of the Aggregation Merkle Sum Tree is the root of a "mini" Merkle Sum Tree made of `hash` and `balances`
///
/// # Type Parameters
///
-/// * `N_ASSETS`: The number of assets for each user account
+/// * `N_CURRENCIES`: The number of assets for each user account
/// * `N_BYTES`: Range in which each node balance should lie
#[derive(Debug, Clone)]
-pub struct AggregationMerkleSumTree {
- root: Node,
- nodes: Vec>>,
+pub struct AggregationMerkleSumTree {
+ root: Node,
+ nodes: Vec>>,
depth: usize,
- mini_trees: Vec>,
+ cryptocurrencies: Vec,
+ mini_trees: Vec>,
}
-impl Tree
- for AggregationMerkleSumTree
+impl Tree
+ for AggregationMerkleSumTree
{
- fn root(&self) -> &Node {
+ fn root(&self) -> &Node {
&self.root
}
@@ -31,15 +34,15 @@ impl Tree
&self.depth
}
- fn leaves(&self) -> &[Node] {
- &self.nodes[0]
+ fn nodes(&self) -> &[Vec>] {
+ &self.nodes
}
- fn nodes(&self) -> &[Vec>] {
- &self.nodes
+ fn cryptocurrencies(&self) -> &[Cryptocurrency] {
+ &self.cryptocurrencies
}
- fn get_entry(&self, user_index: usize) -> &Entry {
+ fn get_entry(&self, user_index: usize) -> &Entry {
let (mini_tree_index, entry_index) = self.get_entry_location(user_index);
// Retrieve the mini tree
@@ -49,42 +52,61 @@ impl Tree
mini_tree.get_entry(entry_index)
}
- fn generate_proof(&self, index: usize) -> Result, &'static str> {
+ fn generate_proof(
+ &self,
+ index: usize,
+ ) -> Result, Box>
+ where
+ [usize; N_CURRENCIES + 1]: Sized,
+ [usize; N_CURRENCIES + 2]: Sized,
+ {
let (mini_tree_index, entry_index) = self.get_entry_location(index);
// Retrieve the mini tree
let mini_tree = &self.mini_trees[mini_tree_index];
+ // Retrieve sibling mini tree
+ let sibling_mini_tree_index = if mini_tree_index % 2 == 0 {
+ mini_tree_index + 1
+ } else {
+ mini_tree_index - 1
+ };
+ let sibling_mini_tree = &self.mini_trees[sibling_mini_tree_index];
+
// Build the partial proof, namely from the leaf to the root of the mini tree
let mut partial_proof = mini_tree.generate_proof(entry_index)?;
+ let mut sibling_middle_node_hash_preimages = Vec::new();
+
+ // Retrieve sibling mini tree root hash preimage
+ let sibling_mini_tree_node_preimage = sibling_mini_tree
+ .get_middle_node_hash_preimage(*sibling_mini_tree.depth(), 0)
+ .unwrap();
+
+ sibling_middle_node_hash_preimages.push(sibling_mini_tree_node_preimage);
// Build the rest of the proof (top_proof), namely from the root of the mini tree to the root of the aggregation tree
let mut current_index = mini_tree_index;
-
- let mut sibling_hashes = vec![Fp::from(0); self.depth];
- let mut sibling_sums = vec![[Fp::from(0); N_ASSETS]; self.depth];
let mut path_indices = vec![Fp::from(0); self.depth];
+ #[allow(clippy::needless_range_loop)]
for level in 0..self.depth {
let position = current_index % 2;
- let level_start_index = current_index - position;
- let level_end_index = level_start_index + 2;
-
path_indices[level] = Fp::from(position as u64);
- for i in level_start_index..level_end_index {
- if i != current_index {
- sibling_hashes[level] = self.nodes[level][i].hash;
- sibling_sums[level] = self.nodes[level][i].balances;
- }
+ let sibling_index = current_index - position + (1 - position);
+ if sibling_index < self.nodes[level].len() && level != 0 {
+ // Fetch hash preimage for sibling middle nodes
+ let sibling_node_preimage =
+ self.get_middle_node_hash_preimage(level, sibling_index)?;
+ sibling_middle_node_hash_preimages.push(sibling_node_preimage);
}
current_index /= 2;
}
- // append the top_proof to the partial_proof
- partial_proof.sibling_hashes.extend(sibling_hashes);
- partial_proof.sibling_sums.extend(sibling_sums);
partial_proof.path_indices.extend(path_indices);
+ partial_proof
+ .sibling_middle_node_hash_preimages
+ .extend(sibling_middle_node_hash_preimages);
// replace the root of the partial proof with the root of the aggregation tree
partial_proof.root = self.root.clone();
@@ -93,40 +115,37 @@ impl Tree
}
}
-impl AggregationMerkleSumTree {
- /// Builds a AggregationMerkleSumTree from a set of mini MerkleSumTrees
- /// The leaves of the AggregationMerkleSumTree are the roots of the mini MerkleSumTrees
+impl
+ AggregationMerkleSumTree
+{
+ /// Builds a AggregationMerkleSumTree from a set of mini MerkleSumTree
+ /// The leaves of the AggregationMerkleSumTree are the roots of the mini MerkleSumTree
pub fn new(
- mini_trees: Vec>,
- ) -> Result>
+ mini_trees: Vec>,
+ cryptocurrencies: Vec,
+ ) -> Result, Box>
where
- [usize; N_ASSETS + 1]: Sized,
- [usize; 2 * (1 + N_ASSETS)]: Sized,
+ [usize; N_CURRENCIES + 1]: Sized,
+ [usize; N_CURRENCIES + 2]: Sized,
{
+ if mini_trees.is_empty() {
+ return Err("Empty mini tree inputs".into());
+ }
+
// assert that all mini trees have the same depth
let depth = mini_trees[0].depth();
assert!(mini_trees.iter().all(|x| x.depth() == depth));
- Self::build_tree(mini_trees)
- }
-
- fn build_tree(
- mini_trees: Vec>,
- ) -> Result, Box>
- where
- [usize; N_ASSETS + 1]: Sized,
- [usize; 2 * (1 + N_ASSETS)]: Sized,
- {
// extract all the roots of the mini trees
let roots = mini_trees
.iter()
.map(|x| x.root().clone())
- .collect::>>();
+ .collect::>>();
let depth = (roots.len() as f64).log2().ceil() as usize;
// Calculate the accumulated balances for each asset
- let mut balances_acc: Vec = vec![Fp::from(0); N_ASSETS];
+ let mut balances_acc: Vec = vec![Fp::from(0); N_CURRENCIES];
for root in &roots {
for (i, balance) in root.balances.iter().enumerate() {
@@ -134,19 +153,6 @@ impl AggregationMerkleSumTree= BigUint::from(2_usize).pow(8 * N_BYTES as u32) {
- return Err(
- "Accumulated balance is not in the expected range, proof generation will fail!"
- .into(),
- );
- }
- }
-
let mut nodes = vec![];
let root = build_merkle_tree_from_leaves(&roots, depth, &mut nodes)?;
@@ -154,11 +160,12 @@ impl AggregationMerkleSumTree &MerkleSumTree {
+ pub fn mini_tree(&self, tree_index: usize) -> &MerkleSumTree {
&self.mini_trees[tree_index]
}
@@ -178,27 +185,26 @@ impl AggregationMerkleSumTree::new("src/data/entry_16_1.csv").unwrap();
+ MerkleSumTree::::from_csv("csv/entry_16_1.csv").unwrap();
let mini_tree_2 =
- MerkleSumTree::::new("src/data/entry_16_2.csv").unwrap();
+ MerkleSumTree::::from_csv("csv/entry_16_2.csv").unwrap();
- let aggregation_mst = AggregationMerkleSumTree::::new(vec![
- mini_tree_1.clone(),
- mini_tree_2.clone(),
- ])
+ let aggregation_mst = AggregationMerkleSumTree::::new(
+ vec![mini_tree_1.clone(), mini_tree_2.clone()],
+ mini_tree_1.cryptocurrencies().to_owned().to_vec(),
+ )
.unwrap();
// get root
@@ -235,21 +241,51 @@ mod test {
assert!(aggregation_mst.verify_proof(&proof));
}
+ #[test]
+ fn test_aggregation_mst_compare_mst_result() {
+ // create new mini merkle sum tree
+ let mut mini_trees = Vec::new();
+ for i in 1..=4 {
+ let mini_tree = MerkleSumTree::::from_csv(&format!(
+ "csv/entry_16_{}.csv",
+ i
+ ))
+ .unwrap();
+ mini_trees.push(mini_tree);
+ }
+ let cryptocurrencies = mini_trees[0].cryptocurrencies().to_owned().to_vec();
+ let aggregation_mst =
+ AggregationMerkleSumTree::::new(mini_trees, cryptocurrencies)
+ .unwrap();
+
+ let aggregation_mst_root = aggregation_mst.root();
+
+ // The entry_64.csv file is the aggregation of entry_16_1, entry_16_2, entry_16_3, entry_16_4
+ let single_merkle_sum_tree =
+ MerkleSumTree::::from_csv("csv/entry_64.csv").unwrap();
+
+ assert_eq!(
+ aggregation_mst_root.hash,
+ single_merkle_sum_tree.root().hash
+ );
+ }
+
#[test]
fn test_aggregation_mst_overflow() {
- // create new mini merkle sum trees. The accumulated balance for each mini tree is in the expected range
- // note that the accumulated balance of the tree generated from entry_16_3 is just in the expected range for 1 unit
+ // create new mini merkle sum tree. The accumulated balance for each mini tree is in the expected range
+ // note that the accumulated balance of the tree generated from entry_16_4 is just in the expected range for 1 unit
let merkle_sum_tree_1 =
- MerkleSumTree::::new("src/data/entry_16_1.csv").unwrap();
+ MerkleSumTree::::from_csv("csv/entry_16.csv").unwrap();
let merkle_sum_tree_2 =
- MerkleSumTree::::new("src/data/entry_16_.csv").unwrap();
+ MerkleSumTree::::from_csv("csv/entry_16_no_overflow.csv")
+ .unwrap();
// When creating the aggregation merkle sum tree, the accumulated balance of the two mini trees is not in the expected range, an error is thrown
- let result = AggregationMerkleSumTree::::new(vec![
- merkle_sum_tree_1,
- merkle_sum_tree_2.clone(),
- ]);
+ let result = AggregationMerkleSumTree::::new(
+ vec![merkle_sum_tree_1, merkle_sum_tree_2.clone()],
+ merkle_sum_tree_2.cryptocurrencies().to_vec(),
+ );
if let Err(e) = result {
assert_eq!(
diff --git a/src/data/entry_16_1.csv b/src/data/entry_16_1.csv
deleted file mode 100644
index 228b1db..0000000
--- a/src/data/entry_16_1.csv
+++ /dev/null
@@ -1,17 +0,0 @@
-username;balances
-dxGaEAii;11888,41163
-MBlfbBGI;67823,18651
-lAhWlEWZ;18651,2087
-nuZweYtO;22073,55683
-gbdSwiuY;34897,83296
-RZNneNuP;83296,16881
-YsscHXkp;31699,35479
-RkLzkDun;2087,79731
-HlQlnEYI;30605,11888
-RqkZOFYe;16881,14874
-NjCSRAfD;41163,67823
-pHniJMQY;14874,22073
-dOGIMzKR;10032,10032
-HfMDmNLp;55683,34897
-xPLKzCBl;79731,30605
-AtwIxZHo;35479,31699
diff --git a/src/data/entry_16_2.csv b/src/data/entry_16_2.csv
deleted file mode 100644
index b2183a2..0000000
--- a/src/data/entry_16_2.csv
+++ /dev/null
@@ -1,17 +0,0 @@
-username;balances
-aaGaEAaa;11888,41163
-bblfbBGI;67823,18651
-cchWlEWZ;18651,2087
-ddZweYtO;22073,55683
-eedSwiuY;34897,83296
-ffNneNuP;83296,16881
-ggscHXkp;31699,35479
-hhLzkDun;2087,79731
-iiQlnEYI;30605,11888
-llkZOFYe;16881,14874
-mmCSRAfD;41163,67823
-nnniJMQY;14874,22073
-ooGIMzKR;10032,10032
-ppMDmNLp;55683,34897
-qqLKzCBl;79731,30605
-rrwIxZHo;35479,31699
\ No newline at end of file
diff --git a/src/data/entry_16_3.csv b/src/data/entry_16_3.csv
deleted file mode 100644
index 49dc935..0000000
--- a/src/data/entry_16_3.csv
+++ /dev/null
@@ -1,17 +0,0 @@
-username;balances
-dxGaEAii;18446744073709551615,0
-MBlfbBGI;0,18446744073709551615
-lAhWlEWZ;0,0
-nuZweYtO;0,0
-gbdSwiuY;0,0
-RZNneNuP;0,0
-YsscHXkp;0,0
-RkLzkDun;0,0
-HlQlnEYI;0,0
-RqkZOFYe;0,0
-NjCSRAfD;0,0
-pHniJMQY;0,0
-dOGIMzKR;0,0
-HfMDmNLp;0,0
-xPLKzCBl;0,0
-AtwIxZHo;0,0
diff --git a/src/executor/cloud_spawner.rs b/src/executor/cloud_spawner.rs
new file mode 100644
index 0000000..d98d975
--- /dev/null
+++ b/src/executor/cloud_spawner.rs
@@ -0,0 +1,188 @@
+use std::error::Error;
+use std::sync::{
+ atomic::{AtomicUsize, Ordering},
+ Arc,
+};
+use std::{future::Future, pin::Pin};
+
+use bollard::network::ListNetworksOptions;
+use bollard::service::{ListServicesOptions, UpdateServiceOptions};
+use tokio::sync::oneshot;
+
+use crate::executor::utils::get_specs_from_compose;
+use crate::executor::{Executor, ExecutorSpawner};
+
+pub struct CloudSpawner {
+ service_info: Option<(String, String)>,
+ worker_counter: Arc,
+ worker_node_url: Vec,
+ default_port: i64,
+}
+
+/// CloudSpawner
+///
+/// Designed for cloud-based resources and Docker Swarm, CloudSpawner is optimized for scalability and high availability.
+/// While functioning similarly to LocalSpawner, it extends its capabilities by initializing workers on remote machines, making it particularly suitable for Swarm network setups.
+///
+/// CloudSpawner can be utilized in two ways:
+///
+/// - Without `service_info`, CloudSpawner does not directly manage Worker instances.
+/// In this mode, it does not control or interact with the Docker API for worker management.
+///
+/// - With `service_info`, CloudSpawner requires a `docker-compose` file. When provided with `service_info`,
+/// it manages Docker services and networks, enabling dynamic scaling and orchestration of workers.
+impl CloudSpawner {
+ pub fn new(
+ service_info: Option<(String, String)>, // If the user want to use docker-compose.yml for docker swarm
+ worker_node_url: Vec,
+ default_port: i64,
+ ) -> Self {
+ assert!(!worker_node_url.is_empty(), "Worker node url is empty");
+ CloudSpawner {
+ service_info,
+ worker_counter: Arc::new(AtomicUsize::new(0)),
+ worker_node_url,
+ default_port,
+ }
+ }
+
+ async fn create_service(service_name: &str, compose_path: &str) -> Result<(), Box> {
+ let docker = bollard::Docker::connect_with_local_defaults().unwrap();
+
+ // Retrieve network options and service spec from docker-compose.yml
+ let (network_options, service_spec) =
+ get_specs_from_compose(service_name, compose_path).unwrap();
+
+ // Check network exist then create if not exist
+ let list_network = docker
+ .list_networks(None::>)
+ .await?;
+
+ let mut found_target_network = false;
+ list_network.iter().for_each(|network| {
+ if service_name == *network.name.as_ref().unwrap() {
+ found_target_network = true;
+ }
+ });
+
+ if !found_target_network {
+ match docker.create_network(network_options).await {
+ Ok(result) => println!("Network created: {:?}", result),
+ Err(error) => eprintln!("Error creating network: {}", error),
+ }
+ }
+
+ // Checking service exist then create if not exist
+ let services = docker
+ .list_services(None::>)
+ .await?;
+
+ let mut found_exist_service = false;
+ let mut service_version = 0;
+
+ services.iter().for_each(|service| {
+ let retrieved_service_spec = service
+ .spec
+ .as_ref()
+ .ok_or::>("No spec in service on Docker".into())
+ .unwrap();
+ let retrieved_service_name = retrieved_service_spec
+ .name
+ .as_ref()
+ .ok_or::>("No name in service.spec on Docker".into())
+ .unwrap();
+
+ if service_name == *retrieved_service_name {
+ found_exist_service = true;
+
+ // Update service version
+ let retrieved_service_version = service
+ .version
+ .as_ref()
+ .ok_or::>("No version in service on Docker".into())
+ .unwrap();
+ if let Some(exist_version) = retrieved_service_version.index {
+ service_version = exist_version + 1;
+ }
+ }
+ });
+
+ if !found_exist_service {
+ docker.create_service(service_spec, None).await?;
+ println!("Service {:?} created", service_name);
+ } else {
+ println!(
+ "Service {:?} already exists, Will use the service",
+ service_name
+ );
+ let update_service_options = UpdateServiceOptions {
+ version: service_version,
+ ..Default::default()
+ };
+ let update_response = docker
+ .update_service(service_name, service_spec, update_service_options, None)
+ .await?;
+ update_response.warnings.iter().for_each(|warning| {
+ println!("warning: {:?}", warning);
+ });
+ };
+ Ok(())
+ }
+}
+
+impl ExecutorSpawner for CloudSpawner {
+ fn spawn_executor(&self) -> Pin + Send>> {
+ let (tx, rx) = oneshot::channel();
+
+ let current_worker_counter = self.worker_counter.load(Ordering::SeqCst);
+
+ // Create service if the worker counter is 0, which means no executor is spawned.
+ if current_worker_counter == 0 && self.service_info.is_some() {
+ let (service_name, compose_path) = self.service_info.clone().unwrap();
+ tokio::spawn(async move {
+ if let Err(e) = CloudSpawner::create_service(&service_name, &compose_path).await {
+ eprintln!("Error creating service: {}", e);
+ } else {
+ // Sleep for 5 seconds to wait for the service to be ready
+ std::thread::sleep(std::time::Duration::from_secs(5));
+ let _ = tx.send(service_name.clone());
+ println!("Service {} created", service_name);
+ }
+ });
+ }
+
+ // The traffic is routed to the service by the swarm manager.
+ // So, All executor can use the same exposed endpoint for distributing task to multiple workers.
+ let port = self.default_port;
+ let node_url = self.worker_node_url[current_worker_counter].clone();
+ let worker_counter = self.worker_counter.clone();
+ Box::pin(async move {
+ if worker_counter.load(Ordering::SeqCst) == 0 {
+ let _ = rx.await;
+ }
+ // Check if the URL already contains a port
+ let has_port = node_url.split(':').last().unwrap().parse::().is_ok();
+
+ // Append the port if it's not there
+ let final_url = if has_port {
+ node_url.clone()
+ } else {
+ format!("{}:{}", node_url, port)
+ };
+ worker_counter.fetch_add(1, Ordering::SeqCst);
+ Executor::new(format!("http://{}", final_url), None)
+ })
+ }
+
+ fn terminate_executors(&self) -> Pin + Send>> {
+ let service_info = self.service_info.clone();
+ Box::pin(async move {
+ if let Some((service_name, _)) = service_info.clone() {
+ let docker = bollard::Docker::connect_with_local_defaults().unwrap();
+
+ docker.delete_service(&service_name).await.unwrap();
+ docker.remove_network(&service_name).await.unwrap();
+ }
+ })
+ }
+}
diff --git a/src/executor/local_spawner.rs b/src/executor/local_spawner.rs
new file mode 100644
index 0000000..0ace6fe
--- /dev/null
+++ b/src/executor/local_spawner.rs
@@ -0,0 +1,214 @@
+use bollard::{
+ container::{Config, CreateContainerOptions, RemoveContainerOptions, StartContainerOptions},
+ models::{HostConfig, PortBinding},
+ service::ContainerInspectResponse,
+ Docker,
+};
+use std::{
+ collections::HashMap,
+ default::Default,
+ env,
+ error::Error,
+ future::Future,
+ net::{SocketAddr, TcpListener, IpAddr},
+ pin::Pin,
+ sync::atomic::{AtomicUsize, Ordering}, str::FromStr,
+};
+use tokio;
+use tokio::sync::oneshot;
+
+use crate::executor::{Executor, ExecutorSpawner};
+
+/// LocalSpawner
+///
+/// The LocalSpawner is to use cases closer to actual deployment. It enables the initialization of Executors
+/// and Workers within a local Docker environment. This spawner is ideal for development and testing phases,
+/// where simplicity and direct control over the containers are beneficial.
+pub struct LocalSpawner {
+ docker: Docker,
+ worker_counter: AtomicUsize,
+ image_name: String,
+ container_name: String,
+}
+
+impl LocalSpawner {
+ pub fn new(image_name: String, container_name: String) -> Self {
+ let docker = match env::var("DOCKER_HOST") {
+ // Read `DOCKER_HOST` environment variable as default
+ Ok(host) => Docker::connect_with_http_defaults()
+ .unwrap_or_else(|_| panic!("Failed to connect to {} for using Docker", host)),
+ _ => Docker::connect_with_local_defaults()
+ .unwrap_or_else(|_| panic!("Failed to connect to Docker")),
+ };
+
+ LocalSpawner {
+ docker,
+ worker_counter: AtomicUsize::new(0),
+ image_name,
+ container_name,
+ }
+ }
+
+ fn find_unused_port() -> Result {
+ // Bind to address with port 0.
+ // The OS will assign an available ephemeral port.
+ let listener = TcpListener::bind("127.0.0.1:0")?;
+
+ // Retrieve the assigned port.
+ match listener.local_addr() {
+ Ok(SocketAddr::V4(addr)) => Ok(addr.port()),
+ Ok(SocketAddr::V6(addr)) => Ok(addr.port()),
+ Err(e) => Err(e),
+ }
+ }
+
+ // Create a Docker instance connected to the local Docker daemon.
+ pub async fn create_container(
+ docker: Docker,
+ image_name: String,
+ container_name: String,
+ id: usize,
+ desirable_port: u16,
+ ) -> Result> {
+ let container_name = format!("{}_{}", container_name, id);
+
+ // Define port mapping (container_port -> host_port)
+ let port_bindings = {
+ let mut port_bindings = HashMap::new();
+ port_bindings.insert(
+ "4000/tcp".to_string(), // Container port
+ Some(vec![PortBinding {
+ host_ip: Some(IpAddr::from_str("127.0.0.1").unwrap().to_string()), // Host IP
+ host_port: Some(desirable_port.to_string()), // Host port
+ }]),
+ );
+ port_bindings
+ };
+
+ let config = Config {
+ image: Some(image_name),
+ exposed_ports: Some(HashMap::from([("4000/tcp".to_string(), HashMap::<(), ()>::new())])), // Expose the container port
+ host_config: Some(HostConfig {
+ port_bindings: Some(port_bindings),
+ ..Default::default()
+ }),
+ ..Default::default()
+ };
+
+ // Create the container.
+ let create_container_options = CreateContainerOptions {
+ name: container_name.clone(),
+ platform: None,
+ };
+
+ docker
+ .create_container(Some(create_container_options), config.clone())
+ .await?;
+
+ docker
+ .start_container(
+ &container_name.clone(),
+ None::>,
+ )
+ .await?;
+
+ let container_info: ContainerInspectResponse =
+ docker.inspect_container(&container_name, None).await?;
+
+ Ok(container_info)
+ }
+}
+
+impl ExecutorSpawner for LocalSpawner {
+ fn spawn_executor(&self) -> Pin + Send>> {
+ // Using channel that onetime use, `oneshot`, to send container information
+ let (tx, rx) = oneshot::channel();
+
+ // These variables has to be cloned because these are moved into the async block
+ let docker_clone = self.docker.clone();
+ let image_name = self.image_name.clone();
+ let container_name = self.container_name.clone();
+ let id = self.worker_counter.fetch_add(1, Ordering::SeqCst);
+ tokio::spawn(async move {
+ let desirable_port = LocalSpawner::find_unused_port().unwrap_or_default();
+ let res = LocalSpawner::create_container(
+ docker_clone,
+ image_name,
+ container_name,
+ id,
+ desirable_port,
+ )
+ .await;
+ match res {
+ Ok(container_info) => {
+ // the desirable_port is the port that is exposed to the host
+ let _ = tx.send((desirable_port, container_info));
+ }
+ Err(e) => {
+ eprintln!("Error creating container: {}", e);
+ }
+ }
+ });
+
+ // Return a Future that resolves to Executor
+ Box::pin(async move {
+ // the container_info also has exposed port as 'host_port` field but it looks ugly to use it
+ let (exposed_port, container_info) = rx.await.expect("Failed to receive worker URL");
+ let worker_url = format!(
+ "http://127.0.0.1:{}", // This port is exposed to the host
+ exposed_port
+ );
+ Executor::new(worker_url, container_info.name)
+ })
+ }
+
+ fn terminate_executors(&self) -> Pin + Send>> {
+ let docker_clone = self.docker.clone();
+
+ let container_name = self.container_name.clone();
+ let worker_counter = self.worker_counter.load(Ordering::SeqCst);
+ Box::pin(async move {
+ // Remove the container
+ let remove_options = RemoveContainerOptions {
+ force: true, // Force the removal of the container
+ ..Default::default()
+ };
+
+ for i in 0..worker_counter {
+ let container_name_with_id = format!("{}_{}", container_name, i);
+ if let Err(e) = docker_clone
+ .remove_container(&container_name_with_id, Some(remove_options))
+ .await
+ {
+ eprintln!("Error removing container: {}", e);
+ }
+ }
+ })
+ }
+}
+
+#[cfg(feature = "docker")]
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[tokio::test]
+ async fn test_executor_spawner() {
+ let spawner = LocalSpawner::new(
+ "summadev/summa-aggregation-mini-tree:latest".to_string(),
+ "executor_test".to_string(),
+ );
+
+ // Spawn 2 executors
+ let executor_1 = spawner.spawn_executor().await;
+ let executor_2 = spawner.spawn_executor().await;
+
+ // Sleep 2 seconds for the container to be ready
+ tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
+ assert!(!executor_1.get_url().is_empty());
+ assert!(!executor_2.get_url().is_empty());
+
+ // Teardown
+ spawner.terminate_executors().await;
+ }
+}
diff --git a/src/executor/mock_spawner.rs b/src/executor/mock_spawner.rs
new file mode 100644
index 0000000..d94f0a8
--- /dev/null
+++ b/src/executor/mock_spawner.rs
@@ -0,0 +1,114 @@
+use axum::{routing::post, Router};
+use std::{
+ future::Future,
+ net::SocketAddr,
+ pin::Pin,
+ str::FromStr,
+ sync::atomic::{AtomicUsize, Ordering},
+};
+use tokio;
+use tokio::sync::oneshot;
+
+use crate::executor::{Executor, ExecutorSpawner};
+use crate::mini_tree_generator::create_mst;
+
+/// MockSpawner
+///
+/// Primarily used for testing purposes, the MockSpawner initializes Executors suitable for various test scenarios,
+/// including negative test cases. It runs the `mini-tree-server` locally, allowing for a controlled testing environment.
+pub struct MockSpawner {
+ urls: Option>,
+ worker_counter: AtomicUsize,
+}
+
+impl MockSpawner {
+ pub fn new(urls: Option>) -> Self {
+ MockSpawner {
+ urls,
+ worker_counter: AtomicUsize::new(0),
+ }
+ }
+}
+
+impl ExecutorSpawner for MockSpawner {
+ fn spawn_executor(&self) -> Pin + Send>> {
+ let (tx, rx) = oneshot::channel();
+
+ let id = self.worker_counter.fetch_add(1, Ordering::SeqCst);
+
+ // If urls is not None, use the urls to spawn executors
+ if self.urls.is_some() && self.urls.as_ref().unwrap().len() > id {
+ let url = self.urls.as_ref().unwrap()[id].clone();
+ let _ = tx.send(SocketAddr::from_str(&url).unwrap());
+
+ return Box::pin(async move {
+ let url = rx.await.expect("Failed to receive worker URL");
+ let worker_url = format!("http://{}", url);
+ Executor::new(worker_url, None)
+ });
+ }
+
+ // if there is no url or already used all urls, spawn a new executor
+ tokio::spawn(async move {
+ let app = Router::new().route("/", post(create_mst));
+
+ // Bind to port 0 to let the OS choose a random port
+ let addr = SocketAddr::from(([127, 0, 0, 1], 0));
+
+ let server = axum::Server::bind(&addr).serve(app.into_make_service());
+
+ // Send worker url to rx
+ let _ = tx.send(server.local_addr());
+
+ // Start server
+ server.await.unwrap();
+ });
+
+ // Return a Future that resolves to Executor
+ Box::pin(async move {
+ // load currnet worker counter
+ let url = rx.await.expect("Failed to receive worker URL");
+ let worker_url = format!("http://{}", url);
+ Executor::new(worker_url, None)
+ })
+ }
+
+ fn terminate_executors(&self) -> Pin + Send>> {
+ Box::pin(async move {
+ // Nothing to do if no executors are running
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[tokio::test]
+ async fn test_new_urls() {
+ let spawner = MockSpawner::new(None);
+
+ // Spawn 2 executors
+ let executor_1 = spawner.spawn_executor().await;
+ let executor_2 = spawner.spawn_executor().await;
+
+ // Sleep 2 seconds for the container to be ready
+ tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
+ assert!(!executor_1.get_url().is_empty());
+ assert!(!executor_2.get_url().is_empty());
+ }
+
+ #[tokio::test]
+ async fn test_with_given_url() {
+ let urls = vec!["192.168.0.1:65535".to_string()];
+ let spawner = MockSpawner::new(Some(urls));
+
+ // Spawn 2 executors
+ let executor_1 = spawner.spawn_executor().await;
+ let executor_2 = spawner.spawn_executor().await;
+
+ tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
+ assert_eq!(executor_1.get_url(), "http://192.168.0.1:65535");
+ assert_ne!(executor_2.get_url(), "http://192.168.0.1:65535");
+ }
+}
diff --git a/src/executor/mod.rs b/src/executor/mod.rs
new file mode 100644
index 0000000..e0e7870
--- /dev/null
+++ b/src/executor/mod.rs
@@ -0,0 +1,87 @@
+mod cloud_spawner;
+mod local_spawner;
+mod mock_spawner;
+mod spawner;
+mod test;
+mod utils;
+
+pub use cloud_spawner::CloudSpawner;
+pub use local_spawner::LocalSpawner;
+pub use mock_spawner::MockSpawner;
+pub use spawner::ExecutorSpawner;
+
+use reqwest::Client;
+use std::error::Error;
+use tokio::time::{sleep, Duration};
+
+use crate::json_mst::{JsonEntry, JsonMerkleSumTree};
+use summa_backend::merkle_sum_tree::MerkleSumTree;
+
+/// Executor role and functionality.
+/// Acts as an intermediary between the Orchestrator and Workers, facilitating the data processing workflow.
+/// Each Executor operates in a one-to-one relationship with a Worker, processing entry data into `mini-tree`.
+///
+/// Key aspects of the Executor's role include:
+/// - **Spawning and Connection**: Executors connect with Workers to execute tasks, enhancing the system's scalability.
+/// - **Data Handling and Task Distribution**: Executors manage and distribute data entries, ensuring smooth workflow
+/// - **Communication Bridge**: They facilitate communication within the data pipeline, relaying 'mini-tree' from Workers to the Orchestrator.
+///
+/// Executors are dynamically spawned and connected to Workers for task execution.
+#[derive(Clone)]
+pub struct Executor {
+ client: Client,
+ url: String,
+ id: Option,
+}
+
+impl Executor {
+ pub fn new(url: String, id: Option) -> Self {
+ Executor {
+ client: Client::new(),
+ url,
+ id,
+ }
+ }
+
+ pub fn get_url(&self) -> String {
+ self.url.clone()
+ }
+
+ pub fn get_name(&self) -> Option {
+ self.id.clone()
+ }
+
+ pub async fn generate_tree(
+ &self,
+ json_entries: Vec,
+ ) -> Result, Box>
+ where
+ [usize; N_CURRENCIES + 1]: Sized,
+ [usize; N_CURRENCIES + 2]: Sized,
+ {
+ const MAX_RETRIES: u32 = 5;
+ const RETRY_DELAY: Duration = Duration::from_secs(1);
+
+ let mut attempts = 0;
+ loop {
+ attempts += 1;
+ let response = self.client.post(&self.url).json(&json_entries).send().await;
+
+ match response {
+ Ok(response) => {
+ let json_tree = response
+ .json::()
+ .await
+ .map_err(|err| Box::new(err) as Box)?;
+
+ let tree = json_tree.to_mst().unwrap();
+ return Ok(tree);
+ }
+ Err(_err) if attempts < MAX_RETRIES => {
+ sleep(RETRY_DELAY).await;
+ }
+ Err(err) => return Err(Box::new(err) as Box),
+ }
+ }
+ }
+}
diff --git a/src/executor/spawner.rs b/src/executor/spawner.rs
new file mode 100644
index 0000000..64d5939
--- /dev/null
+++ b/src/executor/spawner.rs
@@ -0,0 +1,70 @@
+use std::{future::Future, pin::Pin};
+
+use crate::executor::Executor;
+
+/// ExecutorSpawner responsibility and types.
+///
+/// Responsible for initializing and terminating Executors, serving as a management point for creating Executor instances and Workers.
+///
+/// Types include:
+/// - MockSpawner: For testing, runs `mini-tree-server` locally.
+/// - LocalSpawner: Initializes Executors and Workers in local Docker environments.
+/// - CloudSpawner: Optimized for cloud resources and Docker Swarm, manages containers as services for scalability.
+///
+/// Note: ExecutorSpawner is a trait with key methods `spawn_executor` and `terminate_executor`.
+///
+pub trait ExecutorSpawner {
+ /// Spawns an executor asynchronously.
+ //
+ /// This method initializes an Executor and returns a Future that resolves to the Executor.
+ ///
+ /// To achieve this asynchronously (outside of an async trait function), we use a one-time channel ('oneshot`) to deliver the variables to the Future.
+ ///
+ /// Internally, it performs the following codelines:
+ ///
+ /// 1. Uses a 'oneshot'channel for sending the variables from the spawned async task.
+ /// ```ignore
+ /// let (tx, rx) = oneshot::channel();
+ /// ```
+ /// 2. Clones necessary variables (url, name and so on) to move them into the async block.
+ /// ```ignore
+ /// let url = self.url.clone();
+ /// ```
+ /// 3. Spawns an asynchronous task (`tokio::spawn`) that asynchronously creates a worker and sends back its information.
+ /// ```ignore
+ /// tokio::spawn(async move {
+ /// if let Ok(worker_info) =
+ /// Spawner::create_worker(url).await
+ /// {
+ /// let _ = tx.send(worker_info);
+ /// }
+ /// });
+ /// Note that, the "create_worker" is typically declared in the "Spawner" struct that has "ExecutorSpawner"trait.
+ /// 4. Returns a Future that, upon completion, provides an Executor connected to the newly spawned worker.
+ /// ```ignore
+ /// Box::pin(async move {
+ /// let url = rx.await.expect("Failed to receive worker URL");
+ /// Executor::new(url, None);
+ /// });
+ /// ```
+ ///
+ ///
+ // Returns:
+ // - "Pin + Send>>": A Future that, when awaited, yields an Executor instance and spawns a worker.
+ fn spawn_executor(&self) -> Pin + Send>>;
+
+ /// Terminates all spawned executors (and/or workers) asynchronously.
+ ///
+ /// This method is responsible for gracefully shutting down all active executors (and/or workers) by calling
+ /// To do this, the "Spawner"may needs some fields for storing some accessing point to the workers, which are spawned with the executors.
+ /// For deliver variables to Future results, use a channel like the pattern at 'spawn_executor'.
+ ///
+ /// The termination process typically involves:
+ /// - Iterating through all active Executors and workers.
+ /// - Invoking kind of 'shutdown'on each executors and workers to initiate their shutdown.
+ /// - Awaiting the completion of all shutdown operations.
+ ///
+ // Returns:
+ // - "Pin + Send>>": A Future that, when awaited, indicates that all executors (and/or workers) have been terminated.
+ fn terminate_executors(&self) -> Pin + Send>>;
+}
diff --git a/src/executor/test.rs b/src/executor/test.rs
new file mode 100644
index 0000000..e415ff2
--- /dev/null
+++ b/src/executor/test.rs
@@ -0,0 +1,80 @@
+#![allow(unused_imports)]
+use futures::future;
+use std::error::Error;
+
+use bollard::models::TaskSpecContainerSpec;
+
+use crate::executor::{spawner::ExecutorSpawner, utils::get_specs_from_compose, MockSpawner};
+use crate::json_mst::JsonEntry;
+use summa_backend::merkle_sum_tree::utils::parse_csv_to_entries;
+
+#[test]
+fn test_util_get_specs_from_compose() {
+ let (network_options, service_spec) =
+ get_specs_from_compose("mini_tree", "docker-compose.yml").unwrap();
+
+ let service_name = "mini_tree";
+ assert_eq!(network_options.name, service_name);
+ assert_eq!(network_options.driver, "overlay");
+
+ assert_eq!(service_spec.name.unwrap(), service_name);
+ assert!(service_spec.mode.is_some());
+ assert!(service_spec.task_template.is_some());
+ assert!(service_spec.endpoint_spec.is_some());
+ assert_eq!(
+ service_spec.task_template.unwrap().container_spec.unwrap(),
+ TaskSpecContainerSpec {
+ image: Some("summadev/summa-aggregation-mini-tree:latest".to_string()),
+ ..Default::default()
+ }
+ );
+}
+
+#[tokio::test]
+async fn test_executor() -> Result<(), Box> {
+ let spawner = MockSpawner::new(None);
+ let executor = spawner.spawn_executor().await;
+
+ let (_, entries) = parse_csv_to_entries::<_, 2, 14>("csv/entry_16.csv").unwrap();
+ let json_entries = entries
+ .iter()
+ .map(JsonEntry::from_entry)
+ .collect::>();
+ let merkle_sum_tree = executor.generate_tree::<2, 14>(json_entries).await.unwrap();
+
+ spawner.terminate_executors().await;
+
+ assert_eq!(merkle_sum_tree.index_of_username("dxGaEAii").unwrap(), 0);
+ Ok(())
+}
+
+#[tokio::test]
+async fn test_executor_block() -> Result<(), Box> {
+ let spawner = MockSpawner::new(None);
+ let executor = spawner.spawn_executor().await;
+
+ // Parse two csv files
+ let (_, entries_1) = parse_csv_to_entries::<_, 2, 14>("csv/entry_16.csv").unwrap();
+ let (_, entries_2) = parse_csv_to_entries::<_, 2, 14>("csv/entry_16.csv").unwrap();
+
+ // Convert entries to json_entries
+ let json_entries_1 = entries_1
+ .iter()
+ .map(JsonEntry::from_entry)
+ .collect::>();
+ let json_entries_2 = entries_2
+ .iter()
+ .map(JsonEntry::from_entry)
+ .collect::>();
+
+ let merkle_tree_1 = executor.generate_tree::<2, 14>(json_entries_1);
+ let merkle_tree_2 = executor.generate_tree::<2, 14>(json_entries_2);
+
+ let all_tree = future::join_all([merkle_tree_1, merkle_tree_2]).await;
+
+ spawner.terminate_executors().await;
+
+ assert_eq!(all_tree.len(), 2);
+
+ Ok(())
+}
diff --git a/src/executor/utils.rs b/src/executor/utils.rs
new file mode 100644
index 0000000..ecc0482
--- /dev/null
+++ b/src/executor/utils.rs
@@ -0,0 +1,159 @@
+use serde::{Deserialize, Serialize};
+use std::collections::HashMap;
+use std::error::Error;
+
+use bollard::models::{
+ NetworkAttachmentConfig, ServiceSpec, ServiceSpecMode, ServiceSpecModeReplicated, TaskSpec,
+ TaskSpecContainerSpec, TaskSpecPlacement,
+};
+use bollard::network::CreateNetworkOptions;
+use bollard::service::{EndpointPortConfig, EndpointPortConfigPublishModeEnum, EndpointSpec};
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct DockerCompose {
+ pub version: String,
+ pub services: HashMap,
+ pub networks: Option>,
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct Service {
+ pub image: String,
+ pub ports: Option>,
+ pub deploy: Option,
+ pub networks: Option>,
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct Port {
+ pub target: i64,
+ pub published: i64,
+ pub published_mode: Option,
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct Deploy {
+ pub mode: Option,
+ pub placement: Option,
+ pub replicas: Option,
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct Placement {
+ pub constraints: Option>,
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct Network {
+ driver: Option,
+}
+
+// This helper function return `CreateNetworkOptions` and `ServiceSpec` from `docker-compose.yml`
+pub fn get_specs_from_compose(
+ service_name: &str,
+ file_path: &str,
+) -> Result<(CreateNetworkOptions, ServiceSpec), Box> {
+ let file_content = std::fs::read_to_string(file_path).expect("Unable to read file");
+ let compose = serde_yaml::from_str::(&file_content)?;
+
+ // Declare docker client & default labels
+ let mut labels = HashMap::new();
+ labels.insert(
+ String::from("Summa"),
+ String::from("Dummy key / value for bollard"),
+ );
+
+ // Retrieve network options from docker-compose.yml
+ let mut network_options = CreateNetworkOptions::::default();
+ compose
+ .networks
+ .ok_or("There is no network configuration")
+ .unwrap()
+ .iter()
+ .for_each(|(network_name, network)| {
+ if network_name == service_name {
+ network_options.name = network_name.to_string();
+ network_options.driver = network.driver.clone().unwrap_or("overlay".to_string());
+ network_options.labels = labels.clone();
+ }
+ });
+
+ if network_options.name.is_empty() {
+ return Err(format!(
+ "Network name is empty, It may not exist network name: '{}' in docker-compose file",
+ service_name
+ )
+ .into());
+ }
+
+ // Retrieve service spec from docker-compose.yml
+ let service_spec = match compose.services.get(service_name) {
+ Some(service) => {
+ // Parse these variables from docker-compose.yml
+ let ports = service.ports.as_ref().ok_or("There is no 'ports' field")?;
+ let endpoint_port_config = ports
+ .iter()
+ .map(|port| EndpointPortConfig {
+ target_port: Some(port.target),
+ published_port: Some(port.published),
+ publish_mode: Some(EndpointPortConfigPublishModeEnum::INGRESS),
+ ..Default::default()
+ })
+ .collect::>();
+
+ let deploy = service
+ .deploy
+ .as_ref()
+ .ok_or("There is no 'deploy' field")?;
+ let parsed_replicas = deploy
+ .replicas
+ .ok_or("There is no 'replicas' under 'deploy' field")?;
+ let parsed_contraint = deploy
+ .placement
+ .as_ref()
+ .ok_or("There is no 'placement' field")?
+ .constraints
+ .as_ref()
+ .ok_or("There is no 'constraints' under 'placement' field")?;
+
+ ServiceSpec {
+ name: Some(String::from(service_name)),
+ mode: Some(ServiceSpecMode {
+ replicated: Some(ServiceSpecModeReplicated {
+ replicas: Some(parsed_replicas),
+ }),
+ ..Default::default()
+ }),
+ task_template: Some(TaskSpec {
+ placement: Some(TaskSpecPlacement {
+ constraints: Some(parsed_contraint.to_owned()),
+ ..Default::default()
+ }),
+ container_spec: Some(TaskSpecContainerSpec {
+ image: Some(service.image.clone()),
+ ..Default::default()
+ }),
+ ..Default::default()
+ }),
+ endpoint_spec: Some(EndpointSpec {
+ mode: None,
+ ports: Some(endpoint_port_config),
+ }),
+ networks: Some(vec![NetworkAttachmentConfig {
+ target: Some(service_name.to_string()),
+ ..Default::default()
+ }]),
+ ..Default::default()
+ }
+ }
+ None => {
+ return Err(format!(
+ "Service name: '{}' not found in docker-compose file",
+ service_name
+ )
+ .into())
+ }
+ };
+
+ Ok((network_options, service_spec))
+}
diff --git a/src/json_mst.rs b/src/json_mst.rs
new file mode 100644
index 0000000..82f4790
--- /dev/null
+++ b/src/json_mst.rs
@@ -0,0 +1,175 @@
+use num_bigint::BigUint;
+use serde::{Deserialize, Serialize};
+use std::error::Error;
+
+use halo2_proofs::halo2curves::{bn256::Fr as Fp, group::ff::PrimeField};
+
+use summa_backend::merkle_sum_tree::{Cryptocurrency, Entry, MerkleSumTree, Node, Tree};
+
+/// JsonNode
+/// Represents a entry in the Merkle Sum Tree in JSON format.
+/// The balance in the Merkle Sum Tree was presented BigUint format, but in the JSON format, it is presented as a string.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct JsonEntry {
+ pub username: String,
+ pub balances: Vec,
+}
+
+/// JsonNode
+/// Represents a node in the Merkle Sum Tree in JSON format.
+/// The balance in the Merkle Sum Tree was presented BigUint format, but in the JSON format, it is presented as a string.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct JsonNode {
+ pub hash: String,
+ pub balances: Vec,
+}
+
+/// JsonMerkleSumTree
+/// Represents the entire Merkle Sum Tree in JSON format.
+/// It is used for transmitting tree data between the executor and mini-tree-server.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct JsonMerkleSumTree {
+ pub root: JsonNode,
+ pub nodes: Vec>,
+ pub depth: usize,
+ pub entries: Vec,
+ pub is_sorted: bool,
+}
+
+pub fn convert_node_to_json(node: &Node) -> JsonNode {
+ JsonNode {
+ hash: format!("{:?}", node.hash),
+ balances: node.balances.iter().map(|b| format!("{:?}", b)).collect(),
+ }
+}
+
+fn parse_fp_from_hex(hex_str: &str) -> Fp {
+ let bigint = BigUint::parse_bytes(hex_str[2..].as_bytes(), 16).unwrap();
+ Fp::from_str_vartime(&bigint.to_str_radix(10)).unwrap()
+}
+
+impl JsonEntry {
+ pub fn new(username: String, balances: Vec) -> Self {
+ JsonEntry { username, balances }
+ }
+
+ /// Converts an `Entry` to a `JsonEntry`.
+ ///
+ /// This method translates an `Entry` into its JSON format.
+ /// It is used by the Executor to send Entry data to the mini-tree-server in JSON format.
+ pub fn from_entry(entry: &Entry) -> Self {
+ JsonEntry::new(
+ entry.username().to_string(),
+ entry
+ .balances()
+ .iter()
+ .map(|balance| balance.to_string())
+ .collect(),
+ )
+ }
+
+ /// Converts a `JsonEntry` back to an `Entry`.
+ ///
+ /// This method is utilized by the mini-tree-server when processing data received from the executor in JSON format.
+ /// It converts `JsonEntry` objects back to the `Entry` struct, facilitating the construction of the Merkle Sum Tree.
+ pub fn to_entry(&self) -> Entry {
+ let mut balances: [BigUint; N_CURRENCIES] = std::array::from_fn(|_| BigUint::from(0u32));
+ self.balances.iter().enumerate().for_each(|(i, balance)| {
+ balances[i] = balance.parse::().unwrap();
+ });
+
+ Entry::::new(self.username.clone(), balances).unwrap()
+ }
+}
+
+/// Converts a `JsonNode` back to a `Node` for reconstructing the Merkle Sum Tree from JSON data.
+impl JsonNode {
+ pub fn to_node(&self) -> Node {
+ let hash = parse_fp_from_hex(&self.hash);
+ let balances = self
+ .balances
+ .iter()
+ .map(|balance| parse_fp_from_hex(balance))
+ .collect::>()
+ .try_into()
+ .expect("Incorrect number of balances");
+
+ Node { hash, balances }
+ }
+}
+
+impl JsonMerkleSumTree {
+ /// Converts a MerkleSumTree to its JSON representation.
+ ///
+ /// This function is essential for the mini-tree-server to send the Merkle Sum Tree results back to the executor in JSON format,
+ /// facilitating the translation of the tree structure into a universally readable JSON form.
+ pub fn from_tree(
+ tree: MerkleSumTree,
+ ) -> Self {
+ let root = convert_node_to_json(tree.root());
+ let nodes = tree
+ .nodes()
+ .iter()
+ .map(|node| node.iter().map(convert_node_to_json).collect())
+ .collect();
+ let entries = tree
+ .entries()
+ .iter()
+ .map(|entry| {
+ JsonEntry::new(
+ entry.username().to_string(),
+ entry.balances().iter().map(|b| b.to_string()).collect(),
+ )
+ })
+ .collect();
+
+ JsonMerkleSumTree {
+ root,
+ nodes,
+ depth: *tree.depth(),
+ entries,
+ is_sorted: false,
+ }
+ }
+
+ /// Converts a JsonMerkleSumTree back to a MerkleSumTree.
+ ///
+ /// This function is crucial when handling data received in JSON format from the mini-tree-server.
+ /// It rebuilds the MerkleSumTree on the main machine using the `from_params` method.
+ /// This method is preferred over `from_entries` as the nodes are pre-computed by the mini-tree-server, thus the tree doesn't need to be recomputed from scratch.
+ pub fn to_mst(
+ &self,
+ ) -> Result, Box>
+ where
+ [usize; N_CURRENCIES + 1]: Sized,
+ [usize; N_CURRENCIES + 2]: Sized,
+ {
+ let root: Node = self.root.to_node::();
+ let nodes = self
+ .nodes
+ .iter()
+ .map(|node| node.iter().map(|n| n.to_node()).collect())
+ .collect();
+ let entries = self
+ .entries
+ .iter()
+ .map(|entry| entry.to_entry::())
+ .collect();
+ let cryptocurrencies = vec![
+ Cryptocurrency {
+ name: "Dummy".to_string(),
+ chain: "ETH".to_string(),
+ };
+ N_CURRENCIES
+ ];
+
+ MerkleSumTree::::from_params(
+ root,
+ nodes,
+ self.depth,
+ entries,
+ cryptocurrencies,
+ self.is_sorted,
+ )
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
index cc00c82..bd2a152 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,3 +1,6 @@
#![feature(generic_const_exprs)]
-
pub mod aggregation_merkle_sum_tree;
+pub mod executor;
+pub mod json_mst;
+pub mod mini_tree_generator;
+pub mod orchestrator;
diff --git a/src/mini_tree_generator.rs b/src/mini_tree_generator.rs
new file mode 100644
index 0000000..bfb84df
--- /dev/null
+++ b/src/mini_tree_generator.rs
@@ -0,0 +1,61 @@
+use axum::{extract::Json, http::StatusCode, response::IntoResponse};
+use const_env::from_env;
+
+use crate::json_mst::{JsonEntry, JsonMerkleSumTree};
+use summa_backend::merkle_sum_tree::{Cryptocurrency, Entry, MerkleSumTree};
+
+/// Mini Tree Generator is designed to create Merkle Sum Tree using the Axum web framework.
+/// It primarily handles HTTP requests to generate tree based on provided JSON entries.
+///
+/// Constants:
+/// - `N_CURRENCIES`: The number of cryptocurrencies involved. Set via environment variables.
+/// - `N_BYTES`: The byte size for each entry. Set via environment variables.
+///
+/// Functions:
+/// - `create_mst`: An asynchronous function that processes incoming JSON requests to generate a Merkle Sum Tree.
+/// It converts `JsonEntry` objects into `Entry` instances and then constructs the `MerkleSumTree`.
+/// The function handles the conversion of the `MerkleSumTree` into a JSON format (`JsonMerkleSumTree`) for the response.
+///
+#[from_env]
+const N_CURRENCIES: usize = 2;
+#[from_env]
+const N_BYTES: usize = 14;
+
+pub async fn create_mst(
+ Json(json_entries): Json>,
+) -> Result)> {
+ // Convert `JsonEntry` -> `Entry`
+ let entries = json_entries
+ .iter()
+ .map(|json_entry| json_entry.to_entry())
+ .collect::>>();
+ let crypcocurrencies = vec![
+ Cryptocurrency {
+ name: "DUMMY".to_string(),
+ chain: "ETH".to_string(),
+ };
+ N_CURRENCIES
+ ];
+
+ #[cfg(not(test))]
+ let entries_length = entries.len();
+ #[cfg(not(test))]
+ let starting_time = std::time::Instant::now();
+
+ // Create `MerkleSumTree` from `parsed_entries`
+ let tree =
+ MerkleSumTree::::from_entries(entries, crypcocurrencies, false)
+ .unwrap();
+
+ #[cfg(not(test))]
+ println!(
+ "Time to create tree({} entries): {}ms",
+ entries_length,
+ starting_time.elapsed().as_millis()
+ );
+
+ // Convert `MerkleSumTree` to `JsonMerkleSumTree`
+ let json_tree = JsonMerkleSumTree::from_tree(tree);
+
+ Ok((StatusCode::OK, Json(json_tree)))
+}
diff --git a/src/orchestrator/mod.rs b/src/orchestrator/mod.rs
new file mode 100644
index 0000000..d2e9b05
--- /dev/null
+++ b/src/orchestrator/mod.rs
@@ -0,0 +1,258 @@
+mod test;
+
+use futures::future::join_all;
+use std::{cmp::min, error::Error};
+use summa_backend::merkle_sum_tree::{utils::parse_csv_to_entries, Cryptocurrency, MerkleSumTree};
+use tokio::sync::mpsc;
+use tokio_util::sync::CancellationToken;
+
+use crate::aggregation_merkle_sum_tree::AggregationMerkleSumTree;
+use crate::executor::ExecutorSpawner;
+use crate::json_mst::JsonEntry;
+
+/// The Orchestrator in Summa Aggregation
+///
+/// It serves as the central management component, coordinating data processing activities
+/// between Executors and Workers, thereby improving the efficiency of building the Merkle sum tree.
+///
+/// Functions include dynamically spawning Executors, managing task distribution,
+/// handling errors and pipeline control, and building the `AggregationMerkleSumTree`
+/// by aggregating mini-trees constructed by the Workers.
+pub struct Orchestrator {
+ executor_spawner: Box,
+ entry_csvs: Vec,
+}
+
+impl Orchestrator {
+ pub fn new(executor_spawner: Box, entry_csvs: Vec) -> Self {
+ Self {
+ executor_spawner,
+ entry_csvs,
+ }
+ }
+
+ /// Calculate the range of tasks to be assigned to a executor.
+ ///
+ /// Parameters:
+ /// * `executor_index` - The index of the executor.
+ /// * `total_executors` - The total number of executors.
+ ///
+ /// Returns:
+ /// (start, end)
+ /// A tuple representing the start and end indices of the tasks assigned to the executor
+ ///
+ /// The first value in the tuple, the `start` index, indicates the beginning of the task range for the executor,
+ /// while the second value, the `end` index, specifies the end of the range (exclusive).
+ ///
+ /// This calculation divides the total number of tasks by the number of executors to distribute tasks evenly.
+ /// For instance, if there are 5 tasks and 2 executors, the tasks will be split as follows:
+ /// Executor_1: [1, 2, 3] (start index 0, end index 3)
+ /// Executor_2: [4, 5] (start index 3, end index 5)
+ fn calculate_task_range(
+ &self,
+ executor_index: usize,
+ total_executors: usize,
+ ) -> (usize, usize) {
+ let total_tasks = self.entry_csvs.len();
+ let base_tasks_per_executor = total_tasks / total_executors;
+ let extra_tasks = total_tasks % total_executors;
+
+ let start = executor_index * base_tasks_per_executor + min(executor_index, extra_tasks);
+ let end =
+ (executor_index + 1) * base_tasks_per_executor + min(executor_index + 1, extra_tasks);
+
+ (start, min(end, total_tasks))
+ }
+
+ /// Processes a list of CSV files concurrently using executors and aggregates the results.
+ /// This involves splitting the CSV files based on available executors, distributing tasks,
+ /// and aggregating the results into an `AggregationMerkleSumTree`.
+ ///
+ /// * `executor_count` - The number of executors to use.\
+ ///
+ /// Note: After processing, executors are terminated to release resources.
+ ///
+ /// Data flow
+ ///
+ /// 1. Splits the list of CSV files into segments based on the number of available executors.
+ /// 2. A distribution thread loads each CSV file, parses it into `entries`, and sends these to `entries_tx`.
+ /// 3. Each executor receives `entries` from `entries_rx`, requests tasks to Worker, and sends results back through `tree_tx`.
+ /// 4. The processed data from all executors, collected from `tree_rx`, is aggregated into an `AggregationMerkleSumTree`.
+ /// 5. After processing, executors are terminated to release resources.
+ ///
+ pub async fn create_aggregation_mst(
+ self,
+ executor_count: usize,
+ ) -> Result, Box>
+ where
+ [usize; N_CURRENCIES + 1]: Sized,
+ [usize; N_CURRENCIES + 2]: Sized,
+ {
+ let entries_per_executor = self.entry_csvs.len() / executor_count;
+
+ let mut executors = Vec::new();
+ let mut result_collectors = Vec::new();
+
+ let channel_size = std::env::var("CHANNEL_SIZE")
+ .unwrap_or_default()
+ .parse::()
+ .unwrap_or(32);
+
+ let cancel_token = CancellationToken::new();
+ let actual_number_of_workers = min(executor_count, self.entry_csvs.len());
+ for i in 0..actual_number_of_workers {
+ // Declare channels for communication
+ //
+ // There are three channels are used inthis method.
+ //
+ // - A `entries_tx` receives parsed data from the entry parser to distribute tasks to executors.
+ // - A `tree_tx` channel is used by the executors to send the results of the tasks.
+ //
+ let (entries_tx, mut entries_rx) = mpsc::channel(channel_size);
+ let (tree_tx, tree_rx) = mpsc::channel(channel_size);
+ // Executor
+ //
+ // Spawn executors that process entries with Worker.
+ //
+ // - Receives 'entries' from [entries_rx] channel.
+ // - Processes 'entries' to build a merkle sum tree (done by worker).
+ // - Sends the resulting 'tree' back via [tree_tx] channel.
+ //
+ let executor = self.executor_spawner.spawn_executor().await;
+ result_collectors.push((i, tree_rx));
+
+ let cloned_cancel_token = cancel_token.clone();
+ executors.push(tokio::spawn(async move {
+ loop {
+ tokio::select! {
+ entries_data = entries_rx.recv() => {
+ // When the distribution thread is finished, the channel will be closed.
+ let entries = match entries_data {
+ Some(entries) => entries,
+ None => break,
+ };
+ let processed_task = match executor.generate_tree::(entries).await {
+ Ok(entries) => entries,
+ Err(e) => {
+ eprintln!("Executor_{:?}: error while processing entries {:?}", i, e);
+ cloned_cancel_token.cancel();
+ break;
+ }
+ };
+ if tree_tx.send(processed_task).await.is_err() {
+ eprintln!("Executor_{:?}: Error while sending tree result", i);
+ cloned_cancel_token.cancel();
+ break;
+ }
+ },
+ _ = cloned_cancel_token.cancelled() => {
+ eprintln!("Executor_{:?}: cancel signal received, terminating.", i);
+ break;
+ },
+ }
+ }
+ }));
+
+ // Distributing Tasks
+ //
+ // Spawn a distribution thread that distributes entries to executors
+ //
+ // - Loads CSV file from [csv_file_path].
+ // - Parses CSV file into 'entries'.
+ // - Sends 'entries' to executors via [entries_tx] channel.
+ //
+ let (start, end) = self.calculate_task_range(i, executor_count);
+ let entry_csvs_slice = self.entry_csvs[start..end].to_vec(); // Clone only the necessary slice
+
+ let cloned_cancel_token = cancel_token.clone();
+ tokio::spawn(async move {
+ for file_path in entry_csvs_slice.iter() {
+ let entries = match parse_csv_to_entries::<_, N_CURRENCIES, N_BYTES>(file_path)
+ {
+ Ok((_, entries)) => entries
+ .iter()
+ .map(JsonEntry::from_entry)
+ .collect::>(),
+ Err(e) => {
+ eprintln!(
+ "Executor_{:?}: Error while processing file {:?}: {:?}",
+ i, file_path, e
+ );
+ cloned_cancel_token.cancel();
+ break;
+ }
+ };
+
+ tokio::select! {
+ _ = cloned_cancel_token.cancelled() => {
+ eprintln!("Executor_{:?}: cancel signal received, terminating distributor.", i);
+ break;
+ },
+ send_entries = entries_tx.send(entries) => {
+ if let Err(e) = send_entries {
+ eprintln!("Executor_{:?}: Error while sending entries: {:?}", i, e);
+ cloned_cancel_token.cancel();
+ break;
+ }
+ }
+ }
+ }
+ drop(entries_tx);
+ });
+ }
+
+ // Collecting Results
+ //
+ // Collect `tree` results from executors
+ //
+ // - Receives processed 'tree' from [tree_rx] channel.
+ // - Collects all 'tree' results into 'all_tree_results'.
+ // - Aggregates 'all_tree_results' into 'ordered_tree_results'.
+ //
+ let mut all_tree_responses = Vec::new();
+ for (index, mut tree_rx) in result_collectors {
+ let executor_results = tokio::spawn(async move {
+ let mut trees = Vec::new();
+ while let Some(result) = tree_rx.recv().await {
+ trees.push(result);
+ }
+ (index, trees)
+ });
+ all_tree_responses.push(executor_results);
+ }
+
+ let all_tree_results = join_all(all_tree_responses).await;
+
+ // Aggregate results from all workers in order
+ let mut ordered_tree_results = vec![None; self.entry_csvs.len()];
+ for result in all_tree_results {
+ let (index, worker_results) = result.unwrap();
+ let start = index * entries_per_executor;
+ for (i, res) in worker_results.iter().enumerate() {
+ ordered_tree_results[start + i] = Some(res.clone());
+ }
+ }
+
+ // Terminate executors
+ self.executor_spawner.terminate_executors().await;
+
+ let all_merkle_sum_tree: Vec> =
+ ordered_tree_results.into_iter().flatten().collect();
+
+ // Occur error if the number of mini_tree in 'all_merkle_sum_tree' is not equal to the number of entry_csvs.
+ if all_merkle_sum_tree.len() != self.entry_csvs.len() {
+ return Err("Mismatch in generated mini tree counts and given CSV counts".into());
+ }
+
+ AggregationMerkleSumTree::new(
+ all_merkle_sum_tree,
+ vec![
+ Cryptocurrency {
+ name: "DUMMY".to_string(),
+ chain: "ETH".to_string(),
+ };
+ N_CURRENCIES
+ ],
+ )
+ }
+}
diff --git a/src/orchestrator/test.rs b/src/orchestrator/test.rs
new file mode 100644
index 0000000..aff5cca
--- /dev/null
+++ b/src/orchestrator/test.rs
@@ -0,0 +1,106 @@
+#![allow(unused_imports)]
+use crate::executor::{CloudSpawner, LocalSpawner, MockSpawner};
+use crate::orchestrator::Orchestrator;
+use summa_backend::merkle_sum_tree::Tree;
+
+#[tokio::test]
+async fn test_single_mock_worker() {
+ let spawner = MockSpawner::new(None);
+
+ let orchestrator = Orchestrator::<2, 14>::new(
+ Box::new(spawner),
+ vec![
+ "csv/entry_16_1.csv".to_string(),
+ "csv/entry_16_2.csv".to_string(),
+ ],
+ );
+ let aggregation_merkle_sum_tree = orchestrator.create_aggregation_mst(1).await.unwrap();
+
+ assert_eq!(16, aggregation_merkle_sum_tree.mini_tree(0).entries().len());
+ assert_eq!(16, aggregation_merkle_sum_tree.mini_tree(1).entries().len());
+}
+
+#[tokio::test]
+async fn test_none_exist_csv() {
+ let spawner = MockSpawner::new(None);
+ let orchestrator = Orchestrator::<2, 14>::new(
+ Box::new(spawner),
+ vec![
+ "csv/entry_16.csv".to_string(),
+ "csv/no_exist.csv".to_string(),
+ ],
+ );
+ match orchestrator.create_aggregation_mst(2).await {
+ Ok(_) => panic!("Expected an error"),
+ Err(e) => {
+ assert!(e
+ .to_string()
+ .contains("Mismatch in generated mini tree counts and given CSV counts"));
+ }
+ }
+}
+
+#[tokio::test]
+async fn test_none_exist_worker() {
+ let non_exist_worker_url = vec!["127.0.0.1:40".to_string()]; // unsignable port
+ let spawner = MockSpawner::new(Some(non_exist_worker_url));
+
+ let orchestrator = Orchestrator::<2, 14>::new(
+ Box::new(spawner),
+ vec![
+ "csv/entry_16_1.csv".to_string(),
+ "csv/entry_16_2.csv".to_string(),
+ ],
+ );
+
+ match orchestrator.create_aggregation_mst(2).await {
+ Ok(_) => panic!("Expected an error"),
+ Err(e) => {
+ assert!(e
+ .to_string()
+ .contains("Mismatch in generated mini tree counts and given CSV counts"));
+ }
+ }
+}
+
+// #[cfg(feature = "docker")]
+#[tokio::test]
+async fn test_with_containers() {
+ let spawner = LocalSpawner::new(
+ "summadev/summa-aggregation-mini-tree:latest".to_string(),
+ "orchestrator_test".to_string(),
+ );
+
+ let orchestrator = Orchestrator::<2, 14>::new(
+ Box::new(spawner),
+ vec![
+ "csv/entry_16_1.csv".to_string(),
+ "csv/entry_16_2.csv".to_string(),
+ ],
+ );
+ let aggregation_merkle_sum_tree = orchestrator.create_aggregation_mst(2).await.unwrap();
+
+ assert_eq!(16, aggregation_merkle_sum_tree.mini_tree(0).entries().len());
+ assert_eq!(16, aggregation_merkle_sum_tree.mini_tree(1).entries().len());
+}
+
+#[cfg(feature = "docker-swarm")]
+#[tokio::test]
+async fn test_with_swarm_service() {
+ let spawner = CloudSpawner::new(
+ Some(("mini_tree".to_string(), "docker-compose.yml".to_string())),
+ vec!["10.0.0.1".to_string(), "10.0.0.2".to_string()],
+ 4000,
+ );
+
+ let orchestrator = Orchestrator::<2, 14>::new(
+ Box::new(spawner),
+ vec![
+ "csv/entry_16_1.csv".to_string(),
+ "csv/entry_16_2.csv".to_string(),
+ ],
+ );
+ let aggregation_merkle_sum_tree = orchestrator.create_aggregation_mst(2).await.unwrap();
+ assert_eq!(16, aggregation_merkle_sum_tree.mini_tree(0).entries().len());
+ assert_eq!(16, aggregation_merkle_sum_tree.mini_tree(1).entries().len());
+}