diff --git a/Cargo.lock b/Cargo.lock index ecd62477..078e6bdd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -54,9 +54,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.8" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42cd52102d3df161c77a887b608d7a4897d7cc112886a9537b738a887a03aaff" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "once_cell", @@ -66,9 +66,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] @@ -102,9 +102,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.11" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" dependencies = [ "anstyle", "anstyle-parse", @@ -150,9 +150,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.79" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" [[package]] name = "arbitrary" @@ -194,18 +194,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] name = "async-trait" -version = "0.1.77" +version = "0.1.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -217,7 +217,7 @@ dependencies = [ "attribute-derive-macro", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -233,14 +233,14 @@ dependencies = [ "proc-macro2", "quote", "quote-use", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] name = "autocfg" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" [[package]] name = "axum" @@ -289,9 +289,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", @@ -322,7 +322,7 @@ checksum = "ea4587af76b177449245b3d59e8a94f579f02155c123452bb22d5faa4083bb93" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -357,15 +357,15 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" [[package]] name = "blake3" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" +checksum = "30cca6d3674597c30ddf2c587bf8d9d65c9a84d2326d941cc79c9842dfe0ef52" dependencies = [ "arrayref", "arrayvec", @@ -406,9 +406,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" [[package]] name = "byte-tools" @@ -424,9 +424,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "bytesize" @@ -454,12 +454,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.83" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" -dependencies = [ - "libc", -] +checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" [[package]] name = "cfg-if" @@ -478,7 +475,7 @@ dependencies = [ "js-sys", "num-traits", "wasm-bindgen", - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -520,9 +517,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.0" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80c21025abd42669a92efc996ef13cfb2c5c627858421ea58d5c3b331a6c134f" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ "clap_builder", "clap_derive", @@ -530,9 +527,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.0" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458bf1f341769dfcf849846f65dffdf9146daa56bcd2a47cb4e1de9915567c99" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" dependencies = [ "anstream", "anstyle", @@ -543,23 +540,23 @@ dependencies = [ [[package]] name = "clap_complete" -version = "4.5.0" +version = "4.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "299353be8209bd133b049bf1c63582d184a8b39fd9c04f15fe65f50f88bdfe6c" +checksum = "885e4d7d5af40bfb99ae6f9433e292feac98d452dcb3ec3d25dfe7552b77da8c" dependencies = [ "clap", ] [[package]] name = "clap_derive" -version = "4.5.0" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" +checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -642,6 +639,26 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "const_format" +version = "0.2.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3a214c7af3d04997541b18d432afaff4c455e79e2029079647e72fc2bd27673" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + [[package]] name = "constant_time_eq" version = "0.3.0" @@ -710,9 +727,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" +checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" dependencies = [ "crossbeam-utils", ] @@ -748,7 +765,7 @@ version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "crossterm_winapi", "libc", "mio", @@ -810,7 +827,7 @@ checksum = "62d671cc41a825ebabc75757b62d3d168c577f9149b2d49ece1dad1f72119d25" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -821,16 +838,16 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] name = "derive_tasm_object" version = "0.1.0" -source = "git+https://github.com/TritonVM/tasm-lib.git?rev=928b1fa7522aacdb055c4c04d144b44af241f2e6#928b1fa7522aacdb055c4c04d144b44af241f2e6" +source = "git+https://github.com/dan-da/tasm-lib.git?rev=15a708cf6fcbd9ed3e65e5e2067be6e622176328#15a708cf6fcbd9ed3e65e5e2067be6e622176328" dependencies = [ "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -876,9 +893,9 @@ dependencies = [ [[package]] name = "divan" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c2c28ae7ffaca46791b755c7ef2be7b6feb8daa7800f9ec2c73bcc7488320d1" +checksum = "a0d567df2c9c2870a43f3f2bd65aaeb18dbce1c18f217c3e564b4fbaeb3ee56c" dependencies = [ "cfg-if", "clap", @@ -890,13 +907,13 @@ dependencies = [ [[package]] name = "divan-macros" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffa2173b504e1dd6860b8f28ce2b7e0f6f7dd5a0df224779d5874b856f922d2b" +checksum = "27540baf49be0d484d8f0130d7d8da3011c32a44d4fc873368154f1510e574a2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -905,24 +922,12 @@ version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f0042ff8246a363dbe77d2ceedb073339e85a804b9a47636c6e016a9a32c05f" dependencies = [ - "enum-ordinalize 3.1.15", + "enum-ordinalize", "proc-macro2", "quote", "syn 1.0.109", ] -[[package]] -name = "educe" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4bd92664bf78c4d3dba9b7cdafce6fa15b13ed3ed16175218196942e99168a8" -dependencies = [ - "enum-ordinalize 4.3.0", - "proc-macro2", - "quote", - "syn 2.0.48", -] - [[package]] name = "either" version = "1.10.0" @@ -948,27 +953,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.48", -] - -[[package]] -name = "enum-ordinalize" -version = "4.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" -dependencies = [ - "enum-ordinalize-derive", -] - -[[package]] -name = "enum-ordinalize-derive" -version = "4.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -987,35 +972,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "ext-trait" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d772df1c1a777963712fb68e014235e80863d6a91a85c4e06ba2d16243a310e5" -dependencies = [ - "ext-trait-proc_macros", -] - -[[package]] -name = "ext-trait-proc_macros" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab7934152eaf26aa5aa9f7371408ad5af4c31357073c9e84c3b9d7f11ad639a" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "extension-traits" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a296e5a895621edf9fa8329c83aa1cb69a964643e36cf54d8d7a69b789089537" -dependencies = [ - "ext-trait", -] - [[package]] name = "fake-simd" version = "0.1.2" @@ -1024,9 +980,9 @@ checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" [[package]] name = "fastrand" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" [[package]] name = "ffi-opaque" @@ -1125,7 +1081,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -1194,7 +1150,7 @@ checksum = "13a1bcfb855c1f340d5913ab542e36f25a1c56f57de79022928297632435dec2" dependencies = [ "attribute-derive", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -1210,11 +1166,11 @@ dependencies = [ [[package]] name = "ghash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ - "opaque-debug 0.3.0", + "opaque-debug 0.3.1", "polyval", ] @@ -1226,9 +1182,9 @@ checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "h2" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", @@ -1236,7 +1192,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.2.3", + "indexmap 2.2.6", "slab", "tokio", "tokio-util", @@ -1245,9 +1201,9 @@ dependencies = [ [[package]] name = "half" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc52e53916c08643f1b56ec082790d1e86a32e58dc5268f897f313fbae7b4872" +checksum = "b5eceaaeec696539ddaf7b333340f1af35a5aa87ae3e4f3ead0532f72affab2e" dependencies = [ "cfg-if", "crunchy", @@ -1288,11 +1244,17 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hermit-abi" -version = "0.3.6" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd5256b483761cd23699d0da46cc6fd2ee3be420bbe6d020ae4a091e70b7e9fd" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -1311,9 +1273,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -1420,9 +1382,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.3" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -1430,9 +1392,9 @@ dependencies = [ [[package]] name = "indoc" -version = "2.0.4" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e186cfbae8084e513daff4240b4797e342f988cecda4fb6c939150f96315fd8" +checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" [[package]] name = "inout" @@ -1489,15 +1451,15 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "js-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -1517,31 +1479,6 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -[[package]] -name = "lending-iterator" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc07588c853b50689205fb5c00498aa681d89828e0ce8cbd965ebc7a5d8ae260" -dependencies = [ - "extension-traits", - "lending-iterator-proc_macros", - "macro_rules_attribute", - "never-say-never", - "nougat", - "polonius-the-crab", -] - -[[package]] -name = "lending-iterator-proc_macros" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5445dd1c0deb1e97b8a16561d17fc686ca83e8411128fb036e9668a72d51b1d" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "leveldb-sys" version = "2.0.9" @@ -1568,13 +1505,12 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libredox" -version = "0.0.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "libc", - "redox_syscall", ] [[package]] @@ -1595,25 +1531,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" - -[[package]] -name = "macro_rules_attribute" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf0c9b980bf4f3a37fd7b1c066941dd1b1d0152ce6ee6e8fe8c49b9f6810d862" -dependencies = [ - "macro_rules_attribute-proc_macro", - "paste", -] - -[[package]] -name = "macro_rules_attribute-proc_macro" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58093314a45e00c77d5c508f76e77c3396afbbc0d01506e7fae47b018bac2b1d" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "matchers" @@ -1642,9 +1562,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "memmap2" @@ -1678,9 +1598,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "log", @@ -1710,8 +1630,11 @@ dependencies = [ "aes-gcm", "anyhow", "arbitrary", + "async-stream", + "async-trait", "bech32", "bincode", + "blake3", "bytes", "bytesize", "cargo-husky", @@ -1721,10 +1644,12 @@ dependencies = [ "console-subscriber", "crossterm", "directories", + "divan", "field_count", "futures", "get-size", "itertools 0.11.0", + "leveldb-sys", "memmap2", "num-bigint", "num-rational", @@ -1736,6 +1661,7 @@ dependencies = [ "rand", "ratatui", "regex", + "rs-leveldb", "semver", "serde", "serde_derive", @@ -1746,7 +1672,7 @@ dependencies = [ "test-strategy", "tiny-bip39", "tokio", - "tokio-serde 0.9.0", + "tokio-serde", "tokio-test", "tokio-util", "tracing", @@ -1756,12 +1682,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "never-say-never" -version = "6.6.666" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf5a574dadd7941adeaa71823ecba5e28331b8313fb2e1c6a5c7e5981ea53ad6" - [[package]] name = "nom" version = "7.1.3" @@ -1772,27 +1692,6 @@ dependencies = [ "minimal-lexical", ] -[[package]] -name = "nougat" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b57b9ced431322f054fc673f1d3c7fa52d80efd9df74ad2fc759f044742510" -dependencies = [ - "macro_rules_attribute", - "nougat-proc_macros", -] - -[[package]] -name = "nougat-proc_macros" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84f77a45e99a2f9b492695d99e1c23844619caa5f3e57647cffacad773ca257" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -1925,9 +1824,9 @@ checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" [[package]] name = "opaque-debug" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "opentelemetry" @@ -2058,7 +1957,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -2072,29 +1971,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -2130,21 +2029,15 @@ dependencies = [ "plotters-backend", ] -[[package]] -name = "polonius-the-crab" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2a69ee997a6282f8462abf1e0d8c38c965e968799e912b3bed8c9e8a28c2f9f" - [[package]] name = "polyval" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ "cfg-if", "cpufeatures", - "opaque-debug 0.3.0", + "opaque-debug 0.3.1", "universal-hash", ] @@ -2207,9 +2100,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" dependencies = [ "unicode-ident", ] @@ -2222,13 +2115,13 @@ checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.2", + "bitflags 2.5.0", "lazy_static", "num-traits", "rand", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", "rusty-fork", "tempfile", "unarray", @@ -2246,9 +2139,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a" +checksum = "d0f5d036824e4761737860779c906171497f6d55681139d8312388f8fe398922" dependencies = [ "bytes", "prost-derive", @@ -2256,22 +2149,22 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e" +checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48" dependencies = [ "anyhow", - "itertools 0.11.0", + "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] name = "prost-types" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e" +checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe" dependencies = [ "prost", ] @@ -2299,7 +2192,7 @@ checksum = "a7b5abe3fe82fdeeb93f44d66a7b444dedf2e4827defb0a8e69c437b2de2ef94" dependencies = [ "quote", "quote-use-macros", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -2311,7 +2204,7 @@ dependencies = [ "derive-where", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -2369,7 +2262,7 @@ version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e2e4cd95294a85c3b4446e63ef054eea43e0205b1fd60120c16b74ff7ff96ad" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cassowary", "crossterm", "indoc", @@ -2388,9 +2281,9 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" [[package]] name = "rayon" -version = "1.8.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", @@ -2417,9 +2310,9 @@ dependencies = [ [[package]] name = "redox_users" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ "getrandom", "libredox", @@ -2428,14 +2321,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.3" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.5", - "regex-syntax 0.8.2", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] @@ -2449,13 +2342,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", ] [[package]] @@ -2472,9 +2365,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "rs-leveldb" @@ -2500,11 +2393,11 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustix" -version = "0.38.31" +version = "0.38.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "errno", "libc", "linux-raw-sys", @@ -2531,9 +2424,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" [[package]] name = "same-file" @@ -2552,15 +2445,15 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "semver" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" [[package]] name = "serde" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] @@ -2576,20 +2469,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] name = "serde_json" -version = "1.0.113" +version = "1.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" +checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" dependencies = [ "itoa", "ryu", @@ -2675,18 +2568,18 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -2697,9 +2590,9 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "strsim" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "structmeta" @@ -2710,7 +2603,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -2721,7 +2614,7 @@ checksum = "a60bcaff7397072dca0017d1db428e30d5002e00b6847703e2e42005c95fbe00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -2735,11 +2628,11 @@ dependencies = [ [[package]] name = "strum" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "723b93e8addf9aa965ebe2d11da6d7540fa2283fcea14b3371ff055f7ba13f5f" +checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" dependencies = [ - "strum_macros 0.26.1", + "strum_macros 0.26.2", ] [[package]] @@ -2748,24 +2641,24 @@ version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "rustversion", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] name = "strum_macros" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a3417fc93d76740d974a01654a09777cb500428cc874ca9f45edfe0c4d4cd18" +checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "rustversion", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -2787,9 +2680,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.48" +version = "2.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" dependencies = [ "proc-macro2", "quote", @@ -2820,7 +2713,7 @@ dependencies = [ "tarpc-plugins", "thiserror", "tokio", - "tokio-serde 0.8.0", + "tokio-serde", "tokio-util", "tracing", "tracing-opentelemetry", @@ -2840,12 +2733,15 @@ dependencies = [ [[package]] name = "tasm-lib" version = "0.2.1" -source = "git+https://github.com/TritonVM/tasm-lib.git?rev=928b1fa7522aacdb055c4c04d144b44af241f2e6#928b1fa7522aacdb055c4c04d144b44af241f2e6" +source = "git+https://github.com/dan-da/tasm-lib.git?rev=15a708cf6fcbd9ed3e65e5e2067be6e622176328#15a708cf6fcbd9ed3e65e5e2067be6e622176328" dependencies = [ "anyhow", + "arbitrary", + "const_format", "derive_tasm_object", "hex", - "itertools 0.10.5", + "itertools 0.12.1", + "ndarray", "num", "num-traits", "rand", @@ -2857,9 +2753,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.10.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", "fastrand", @@ -2886,34 +2782,34 @@ dependencies = [ "proc-macro2", "quote", "structmeta", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] name = "thiserror" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" +checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" +checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if", "once_cell", @@ -2996,9 +2892,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.36.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", @@ -3032,7 +2928,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -3040,25 +2936,10 @@ name = "tokio-serde" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "911a61637386b789af998ee23f50aa30d5fd7edcec8d6d3dedae5e5815205466" -dependencies = [ - "bytes", - "educe 0.4.23", - "futures-core", - "futures-sink", - "pin-project", - "serde", - "serde_json", -] - -[[package]] -name = "tokio-serde" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf600e7036b17782571dd44fa0a5cea3c82f60db5137f774a325a76a0d6852b" dependencies = [ "bincode", "bytes", - "educe 0.5.11", + "educe", "futures-core", "futures-sink", "pin-project", @@ -3068,9 +2949,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" dependencies = [ "futures-core", "pin-project-lite", @@ -3079,9 +2960,9 @@ dependencies = [ [[package]] name = "tokio-test" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89b3cbabd3ae862100094ae433e1def582cf86451b4e9bf83aa7ac1d8a7d719" +checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7" dependencies = [ "async-stream", "bytes", @@ -3184,7 +3065,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -3265,12 +3146,11 @@ dependencies = [ [[package]] name = "triton-vm" -version = "0.37.0" +version = "0.38.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b175b3fec57c819f8242fe9a2b9f706bc75c0390ccdf3f88c9f30f6c49bb0ff" +checksum = "85fc0cd3dc6c5a78654584abe02c01d6c9fc7e6a4ad6346f595b434ef36f4ca2" dependencies = [ "arbitrary", - "bincode", "colored", "criterion", "get-size", @@ -3286,7 +3166,7 @@ dependencies = [ "rayon", "serde", "serde_derive", - "strum 0.26.1", + "strum 0.26.2", "thiserror", "twenty-first", "unicode-width", @@ -3300,31 +3180,26 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "twenty-first" -version = "0.37.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74c2f4a50a2fc1a25d26aed9079e6295093ca8fe649f0f00391a9c2cbb03540d" +version = "0.38.0" +source = "git+https://github.com/Neptune-Crypto/twenty-first.git?rev=81573735e9df4836ca16655fd31884271984bac7#81573735e9df4836ca16655fd31884271984bac7" dependencies = [ "arbitrary", "bfieldcodec_derive", "bincode", "blake3", "colored", - "divan", "emojihash-rs", "get-size", "hashbrown 0.14.3", "itertools 0.12.1", "keccak", "lazy_static", - "lending-iterator", - "leveldb-sys", "num-bigint", "num-traits", "phf", "rand", "rand_distr", "rayon", - "rs-leveldb", "serde", "serde-big-array", "serde_derive", @@ -3352,9 +3227,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] @@ -3371,6 +3246,12 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +[[package]] +name = "unicode-xid" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" + [[package]] name = "universal-hash" version = "0.5.1" @@ -3410,9 +3291,9 @@ dependencies = [ [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -3435,9 +3316,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3445,24 +3326,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3470,28 +3351,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "web-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -3534,7 +3415,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -3552,7 +3433,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -3572,17 +3453,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.4", + "windows_aarch64_msvc 0.52.4", + "windows_i686_gnu 0.52.4", + "windows_i686_msvc 0.52.4", + "windows_x86_64_gnu 0.52.4", + "windows_x86_64_gnullvm 0.52.4", + "windows_x86_64_msvc 0.52.4", ] [[package]] @@ -3593,9 +3474,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" [[package]] name = "windows_aarch64_msvc" @@ -3605,9 +3486,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" [[package]] name = "windows_i686_gnu" @@ -3617,9 +3498,9 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" [[package]] name = "windows_i686_msvc" @@ -3629,9 +3510,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" [[package]] name = "windows_x86_64_gnu" @@ -3641,9 +3522,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" [[package]] name = "windows_x86_64_gnullvm" @@ -3653,9 +3534,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" [[package]] name = "windows_x86_64_msvc" @@ -3665,9 +3546,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" [[package]] name = "zerocopy" @@ -3686,7 +3567,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -3706,5 +3587,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] diff --git a/Cargo.toml b/Cargo.toml index 279be72b..62d615ac 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,60 +7,71 @@ default-run = "neptune-core" publish = false [dependencies] -aead = "0" -aes-gcm = "0" -anyhow = "1" -arbitrary = { version = "1", features = ["derive"] } -bech32 = "0" -bincode = "1" -bytes = "1" -bytesize = "1" -chrono = "^0.4.31" -clap = { version = "4", features = ["derive"] } -clap_complete = "4.4.6" -console-subscriber = "0.2.0" -crossterm = "0" -directories = "5" -field_count = "0.1.1" -futures = "0" -get-size = { version = "0", features = ["derive"] } +aead = "0.5" +aes-gcm = "0.10" +anyhow = "1.0" +arbitrary = { version = "1.3", features = ["derive"] } +bech32 = "0.9" +bincode = "1.3" +bytes = "1.6" +bytesize = "1.3" +chrono = "=0.4.34" +clap = { version = "4.5", features = ["derive"] } +clap_complete = "4.4" +console-subscriber = "0.2" +crossterm = "0.27" +directories = "5.0" +field_count = "0.1" +futures = "0.3" +get-size = { version = "0.1", features = ["derive"] } itertools = "0.11" memmap2 = "0.9" -num-bigint = { version = "0", features = ["serde"] } -num-rational = "0" -num-traits = "0" -priority-queue = "1" +num-bigint = { version = "0.4", features = ["serde"] } +num-rational = "0.4" +num-traits = "0.2" +priority-queue = "1.4" proptest = "1.4" proptest-arbitrary-interop = "0.1" rand = "0.8" ratatui = "0.23" regex = "1.10.3" semver = "^1.0.21" -serde = { version = "1", features = ["derive"] } -serde_derive = "1" -serde_json = "1" +serde = { version = "1.0", features = ["derive"] } +serde_derive = "1.0" +serde_json = "1.0" strum = { version = "0.25", features = ["derive"] } -tarpc = { version = "^0.34", features = ["tokio1", "serde-transport", "serde-transport-json", "tcp"] } -tasm-lib = "0.2.1" -tiny-bip39 = "1.0.0" -tokio = { version = "1", features = ["full", "tracing"] } -tokio-serde = { version = "0", features = ["bincode", "json"] } -tokio-util = { version = "0", features = ["codec"] } -tracing = "0" -tracing-subscriber = { version = "0", features = [ +tarpc = { version = "^0.34", features = [ + "tokio1", + "serde-transport", + "serde-transport-json", + "tcp", +] } +tasm-lib = "0.2" +tiny-bip39 = "1.0" +tokio = { version = "1.37", features = ["full", "tracing"] } +tokio-serde = { version = "0.8", features = ["bincode", "json"] } +tokio-util = { version = "0.7", features = ["codec"] } +tracing = "0.1" +tracing-subscriber = { version = "0.3", features = [ "std", "env-filter", "time", "fmt", ] } -tracing-test = "0" -unicode-width = "0" +tracing-test = "0.2" +unicode-width = "0.1" zeroize = "1.7.0" +rs-leveldb = "0.1.5" +leveldb-sys = "2.0.9" +async-trait = "0.1.77" +async-stream = "0.3.5" [dev-dependencies] test-strategy = "0.3" pin-project-lite = "0.2.13" -tokio-test = "0" +tokio-test = "0.4" +blake3 = "1.5.1" +divan = "0.1.14" [dev-dependencies.cargo-husky] default-features = false @@ -90,8 +101,28 @@ opt-level = 0 # codegen-units = 256 # rpath = false +## We use harness = false on these so that the divan reports are output on stdout. + +[[bench]] +name = "sync_atomic" +harness = false + +[[bench]] +name = "db_leveldb" +harness = false + +[[bench]] +name = "db_dbtvec" +harness = false + +[[bench]] +name = "archival_mmr" +harness = false + + [patch.crates-io] -# # rev = "f711ae27d1402d733989624bbadd59b7e82a1972" is tip of tasm-lib master as of 2024-01-25 -# tasm-lib = { git = "https://github.com/TritonVM/tasm-lib.git", rev = "f711ae27d1402d733989624bbadd59b7e82a1972" } -# rev = "928b1fa7522aacdb055c4c04d144b44af241f2e6" is tip of tasm-lib master as of 2024-02-27 -tasm-lib = { git = "https://github.com/TritonVM/tasm-lib.git", rev = "928b1fa7522aacdb055c4c04d144b44af241f2e6" } \ No newline at end of file +# 15a708cf6fcbd9ed3e65e5e2067be6e622176328 is tip of branch: make_storage_async as of 2024-03-18 +tasm-lib = { git = "https://github.com/dan-da/tasm-lib.git", rev = "15a708cf6fcbd9ed3e65e5e2067be6e622176328" } + +# 81573735e9df4836ca16655fd31884271984bac7 = tip of branch: make_storage_async on 2024-03-18 +twenty-first = { git = "https://github.com/Neptune-Crypto/twenty-first.git", rev = "81573735e9df4836ca16655fd31884271984bac7" } diff --git a/Makefile b/Makefile index cec767ff..23972208 100644 --- a/Makefile +++ b/Makefile @@ -56,7 +56,7 @@ run: test: export RUST_BACKTRACE = 1 test: $(info RUSTFLAGS is $(RUSTFLAGS)) - cargo test -- --test-threads=1 + cargo nextest r bench: $(info RUSTFLAGS is $(RUSTFLAGS)) diff --git a/benches/archival_mmr.rs b/benches/archival_mmr.rs new file mode 100644 index 00000000..9efc2059 --- /dev/null +++ b/benches/archival_mmr.rs @@ -0,0 +1,246 @@ +use divan::Bencher; +use leveldb::options::Options; +use leveldb::options::ReadOptions; +use leveldb::options::WriteOptions; +use leveldb_sys::Compression; +use neptune_core::database::storage::storage_schema::traits::*; +use neptune_core::database::storage::storage_schema::DbtVec; +use neptune_core::database::storage::storage_schema::SimpleRustyStorage; +use neptune_core::database::storage::storage_vec::traits::StorageVecBase; +use neptune_core::database::NeptuneLevelDb; +use neptune_core::util_types::mutator_set::archival_mmr::ArchivalMmr; +use rand::random; +use tasm_lib::twenty_first::shared_math::tip5::Tip5; +use tasm_lib::twenty_first::util_types::mmr::shared_advanced::leaf_count_to_node_count; +use tasm_lib::Digest; + +fn main() { + divan::main(); +} + +/// These settings affect DB performance and correctness. +/// +/// Adjust and re-run the benchmarks to see effects. +/// +/// Rust docs: (basic) +/// https://docs.rs/rs-leveldb/0.1.5/leveldb/database/options/struct.Options.html +/// +/// C++ docs: (complete) +/// https://github.com/google/leveldb/blob/068d5ee1a3ac40dabd00d211d5013af44be55bea/include/leveldb/options.h +fn db_options() -> Option { + Some(Options { + // default: false + create_if_missing: true, + + // default: false + error_if_exists: true, + + // default: false + paranoid_checks: false, + + // default: None --> (4 * 1024 * 1024) + write_buffer_size: None, + + // default: None --> 1000 + max_open_files: None, + + // default: None --> 4 * 1024 + block_size: None, + + // default: None --> 16 + block_restart_interval: None, + + // default: Compression::No + // or: Compression::Snappy + compression: Compression::No, + + // default: None --> 8MB + cache: None, + // cache: Some(Cache::new(1024)), + // note: tests put 128 bytes in each entry. + // 100 entries = 12,800 bytes. + // So Cache of 1024 bytes is 8% of total data set. + // that seems reasonably realistic to get some + // hits/misses. + }) +} + +async fn new_ammr(leaf_count: u64) -> (SimpleRustyStorage, ArchivalMmr>) { + let db = NeptuneLevelDb::open_new_test_database( + false, + db_options(), + Some(ReadOptions { + verify_checksums: false, + fill_cache: false, + }), + Some(WriteOptions { sync: true }), + ) + .await + .unwrap(); + let mut rusty_storage = SimpleRustyStorage::new(db); + let mut nv = rusty_storage + .schema + .new_vec::("test-archival-mmr") + .await; + + // Add the dummy node since nodes are 1-indexed in AMMRs. + nv.push(Digest::default()).await; + + let num_nodes = leaf_count_to_node_count(leaf_count); + for _ in 0..num_nodes { + nv.push(random()).await; + } + + (rusty_storage, ArchivalMmr::new(nv).await) +} + +mod append { + use super::*; + + mod append_5000 { + const NUM_WRITE_ITEMS: usize = 5000; + const INIT_AMMR_LEAF_COUNT: u64 = 0; + use tasm_lib::twenty_first::shared_math::other::random_elements; + + use super::*; + + fn append_impl(bencher: Bencher, persist: bool) { + let rt = tokio::runtime::Runtime::new().unwrap(); + let (mut storage, mut ammr) = rt.block_on(new_ammr(INIT_AMMR_LEAF_COUNT)); + let digests = random_elements(NUM_WRITE_ITEMS); + + bencher.bench_local(|| { + rt.block_on(async { + for new_leaf in digests.iter() { + ammr.append(*new_leaf).await; + } + if persist { + storage.persist().await; + } + }); + }); + } + + #[divan::bench] + fn append(bencher: Bencher) { + append_impl(bencher, false); + } + + #[divan::bench] + fn append_and_persist(bencher: Bencher) { + append_impl(bencher, true); + } + } +} + +mod mutate { + use super::*; + + mod mutate_100_of_10000 { + const NUM_MUTATIONS: usize = 100; + const AMMR_LEAF_COUNT: u64 = 10000; + use itertools::Itertools; + use rand::{thread_rng, Rng}; + use tasm_lib::twenty_first::shared_math::other::random_elements; + + use super::*; + + fn leaf_mutation_impl(bencher: Bencher, persist: bool) { + let rt = tokio::runtime::Runtime::new().unwrap(); + let (mut storage, mut ammr) = rt.block_on(new_ammr(AMMR_LEAF_COUNT)); + let mut rng = thread_rng(); + let digests = random_elements(NUM_MUTATIONS); + let leaf_index_of_mutated_leafs = (0..NUM_MUTATIONS as u64) + .map(|_| rng.gen_range(0..AMMR_LEAF_COUNT)) + .collect_vec(); + + bencher.bench_local(|| { + rt.block_on(async { + for (new_leaf, leaf_index) in + digests.iter().zip(leaf_index_of_mutated_leafs.iter()) + { + ammr.mutate_leaf(*leaf_index, *new_leaf).await; + } + if persist { + storage.persist().await; + } + }); + }); + } + + #[divan::bench] + fn leaf_mutation(bencher: Bencher) { + leaf_mutation_impl(bencher, false); + } + + #[divan::bench] + fn leaf_mutation_and_persist(bencher: Bencher) { + leaf_mutation_impl(bencher, true); + } + } +} + +mod batch_mutate_leaf_and_update_mps { + use super::*; + + mod mutate_100_of_10000 { + const NUM_MUTATIONS_IN_BATCH: usize = 100; + const AMMR_LEAF_COUNT: u64 = 10000; + use itertools::Itertools; + use rand::{thread_rng, Rng}; + use tasm_lib::twenty_first::shared_math::other::random_elements; + + use super::*; + + fn batch_leaf_mutation_impl(bencher: Bencher, persist: bool) { + let rt = tokio::runtime::Runtime::new().unwrap(); + let (mut storage, mut ammr) = rt.block_on(new_ammr(AMMR_LEAF_COUNT)); + let mut rng = thread_rng(); + let new_digests = random_elements(NUM_MUTATIONS_IN_BATCH); + let mut leaf_index_of_mutated_leafs = (0..NUM_MUTATIONS_IN_BATCH as u64) + .map(|_| rng.gen_range(0..AMMR_LEAF_COUNT)) + .collect_vec(); + leaf_index_of_mutated_leafs.sort(); + leaf_index_of_mutated_leafs.dedup(); + + let mutation_data = leaf_index_of_mutated_leafs + .into_iter() + .zip(new_digests) + .collect_vec(); + + let mut leaf_indices_for_mps_to_preserve = (0..NUM_MUTATIONS_IN_BATCH as u64) + .map(|_| rng.gen_range(0..AMMR_LEAF_COUNT)) + .collect_vec(); + leaf_indices_for_mps_to_preserve.sort(); + leaf_indices_for_mps_to_preserve.dedup(); + + let mut mps = leaf_indices_for_mps_to_preserve + .iter() + .map(|i| rt.block_on(async { ammr.prove_membership_async(*i).await })) + .collect_vec(); + + bencher.bench_local(|| { + rt.block_on(async { + ammr.batch_mutate_leaf_and_update_mps( + &mut mps.iter_mut().collect_vec(), + mutation_data.clone(), + ) + .await; + if persist { + storage.persist().await; + } + }); + }); + } + + #[divan::bench] + fn leaf_mutation(bencher: Bencher) { + batch_leaf_mutation_impl(bencher, false); + } + + #[divan::bench] + fn leaf_mutation_and_persist(bencher: Bencher) { + batch_leaf_mutation_impl(bencher, true); + } + } +} diff --git a/benches/db_dbtvec.rs b/benches/db_dbtvec.rs new file mode 100644 index 00000000..726adc9c --- /dev/null +++ b/benches/db_dbtvec.rs @@ -0,0 +1,350 @@ +use divan::Bencher; +use leveldb::options::{Options, ReadOptions, WriteOptions}; +use leveldb_sys::Compression; +use neptune_core::database::storage::storage_schema::{traits::*, DbtVec, SimpleRustyStorage}; +use neptune_core::database::storage::storage_vec::traits::*; +use neptune_core::database::NeptuneLevelDb; + +// These database bench tests are made with divan. +// +// See: +// https://nikolaivazquez.com/blog/divan/ +// https://docs.rs/divan/0.1.0/divan/attr.bench.html +// https://github.com/nvzqz/divan +// +// Options for #[bench] attr: +// https://docs.rs/divan/0.1.0/divan/attr.bench.html#options +// +// name, crate, consts, types, sample_count, sample_size, threads +// counters, min_time, max_time, skip_ext_time, ignore + +fn main() { + divan::main(); +} + +/// These settings affect DB performance and correctness. +/// +/// Adjust and re-run the benchmarks to see effects. +/// +/// Rust docs: (basic) +/// https://docs.rs/rs-leveldb/0.1.5/leveldb/database/options/struct.Options.html +/// +/// C++ docs: (complete) +/// https://github.com/google/leveldb/blob/068d5ee1a3ac40dabd00d211d5013af44be55bea/include/leveldb/options.h +fn db_options() -> Option { + Some(Options { + // default: false + create_if_missing: true, + + // default: false + error_if_exists: true, + + // default: false + paranoid_checks: false, + + // default: None --> (4 * 1024 * 1024) + write_buffer_size: None, + + // default: None --> 1000 + max_open_files: None, + + // default: None --> 4 * 1024 + block_size: None, + + // default: None --> 16 + block_restart_interval: None, + + // default: Compression::No + // or: Compression::Snappy + compression: Compression::No, + + // default: None --> 8MB + cache: None, + // cache: Some(Cache::new(1024)), + // note: tests put 128 bytes in each entry. + // 100 entries = 12,800 bytes. + // So Cache of 1024 bytes is 8% of total data set. + // that seems reasonably realistic to get some + // hits/misses. + }) +} + +fn value() -> Vec { + (0..127).collect() +} + +async fn create_test_dbtvec() -> (SimpleRustyStorage, DbtVec>) { + let db = NeptuneLevelDb::open_new_test_database( + true, + db_options(), + Some(ReadOptions { + verify_checksums: false, + fill_cache: false, + }), + Some(WriteOptions { sync: true }), + ) + .await + .unwrap(); + let mut storage = SimpleRustyStorage::new(db); + let vec = storage.schema.new_vec::>("test-vector").await; + (storage, vec) +} + +mod write_100_entries { + use super::*; + + // note: numbers > 100 make the sync_on_write::put() test really slow. + const NUM_WRITE_ITEMS: u64 = 100; + + mod push { + use super::*; + + fn push_impl(bencher: Bencher, persist: bool) { + let rt = tokio::runtime::Runtime::new().unwrap(); + let (mut storage, mut vector) = rt.block_on(create_test_dbtvec()); + + bencher.bench_local(|| { + rt.block_on(async { + for _i in 0..NUM_WRITE_ITEMS { + vector.push(value()).await; + } + if persist { + storage.persist().await; + } + }); + }); + } + + #[divan::bench] + fn push(bencher: Bencher) { + push_impl(bencher, false); + } + + #[divan::bench] + fn push_and_persist(bencher: Bencher) { + push_impl(bencher, true); + } + } + + mod set { + use super::*; + + fn set_impl(bencher: Bencher, persist: bool) { + let rt = tokio::runtime::Runtime::new().unwrap(); + let (mut storage, mut vector) = rt.block_on(create_test_dbtvec()); + + for _i in 0..NUM_WRITE_ITEMS { + rt.block_on(vector.push(value())); + } + + bencher.bench_local(|| { + rt.block_on(async { + for i in 0..NUM_WRITE_ITEMS { + vector.set(i, value()).await; + } + + if persist { + storage.persist().await; + } + }); + }); + } + + #[divan::bench] + fn set(bencher: Bencher) { + set_impl(bencher, false); + } + + #[divan::bench] + fn set_and_persist(bencher: Bencher) { + set_impl(bencher, true); + } + } + + mod set_many { + use super::*; + + fn set_many_impl(bencher: Bencher, persist: bool) { + let rt = tokio::runtime::Runtime::new().unwrap(); + let (mut storage, mut vector) = rt.block_on(create_test_dbtvec()); + + for _ in 0..NUM_WRITE_ITEMS { + rt.block_on(vector.push(vec![42])); + } + + bencher.bench_local(|| { + rt.block_on(async { + let values: Vec<_> = (0..NUM_WRITE_ITEMS).map(|i| (i, value())).collect(); + vector.set_many(values).await; + if persist { + storage.persist().await + } + }); + }); + } + + #[divan::bench] + fn set_many(bencher: Bencher) { + set_many_impl(bencher, false); + } + + #[divan::bench] + fn set_many_and_persist(bencher: Bencher) { + set_many_impl(bencher, true); + } + } + + mod pop { + use super::*; + + fn pop_impl(bencher: Bencher, persist: bool) { + let rt = tokio::runtime::Runtime::new().unwrap(); + let (mut storage, mut vector) = rt.block_on(create_test_dbtvec()); + + for _i in 0..NUM_WRITE_ITEMS { + rt.block_on(vector.push(value())); + } + + bencher.bench_local(|| { + rt.block_on(async { + for _i in 0..NUM_WRITE_ITEMS { + vector.pop().await; + } + + if persist { + storage.persist().await; + } + }); + }); + } + + #[divan::bench] + fn pop(bencher: Bencher) { + pop_impl(bencher, false); + } + + #[divan::bench] + fn pop_and_persist(bencher: Bencher) { + pop_impl(bencher, true); + } + } +} + +mod read_100_entries { + use super::*; + + const NUM_READ_ITEMS: u64 = 100; + + fn get_impl(bencher: Bencher, num_each: usize, persisted: bool) { + let rt = tokio::runtime::Runtime::new().unwrap(); + let vector = rt.block_on(async { + let (mut storage, mut vector) = create_test_dbtvec().await; + + for _i in 0..NUM_READ_ITEMS { + vector.push(value()).await; + } + if persisted { + storage.persist().await; + } + vector + }); + + bencher.bench_local(|| { + rt.block_on(async { + for i in 0..NUM_READ_ITEMS { + for _j in 0..num_each { + let _ = vector.get(i).await; + } + } + }); + }); + } + + fn get_many_impl(bencher: Bencher, num_each: usize, persisted: bool) { + let rt = tokio::runtime::Runtime::new().unwrap(); + let vector = rt.block_on(async { + let (mut storage, mut vector) = create_test_dbtvec().await; + + for _i in 0..NUM_READ_ITEMS { + vector.push(value()).await; + } + if persisted { + storage.persist().await; + } + vector + }); + + let indices: Vec = (0..NUM_READ_ITEMS).collect(); + bencher.bench_local(|| { + rt.block_on(async { + for _j in 0..num_each { + let _ = vector.get_many(&indices).await; + } + }); + }); + } + + mod get_each_entry_1_time { + use super::*; + + mod persisted { + use super::*; + + #[divan::bench] + fn get(bencher: Bencher) { + get_impl(bencher, 1, true); + } + + #[divan::bench] + fn get_many(bencher: Bencher) { + get_many_impl(bencher, 1, true); + } + } + + mod unpersisted { + use super::*; + + #[divan::bench] + fn get(bencher: Bencher) { + get_impl(bencher, 1, false); + } + + #[divan::bench] + fn get_many(bencher: Bencher) { + get_many_impl(bencher, 1, false); + } + } + } + + mod get_each_entry_20_times { + use super::*; + + mod persisted { + use super::*; + + #[divan::bench] + fn get(bencher: Bencher) { + get_impl(bencher, 20, true); + } + + #[divan::bench] + fn get_many(bencher: Bencher) { + get_many_impl(bencher, 20, true); + } + } + + mod unpersisted { + use super::*; + + #[divan::bench] + fn get(bencher: Bencher) { + get_impl(bencher, 20, false); + } + + #[divan::bench] + fn get_many(bencher: Bencher) { + get_many_impl(bencher, 20, false); + } + } + } +} diff --git a/benches/db_leveldb.rs b/benches/db_leveldb.rs new file mode 100644 index 00000000..96ead340 --- /dev/null +++ b/benches/db_leveldb.rs @@ -0,0 +1,442 @@ +use divan::Bencher; +use leveldb::batch::WriteBatch; +use leveldb::options::{Options, ReadOptions, WriteOptions}; +use leveldb_sys::Compression; +use neptune_core::database::leveldb::DB; + +// These database bench tests are made with divan. +// +// See: +// https://nikolaivazquez.com/blog/divan/ +// https://docs.rs/divan/0.1.0/divan/attr.bench.html +// https://github.com/nvzqz/divan +// +// Options for #[bench] attr: +// https://docs.rs/divan/0.1.0/divan/attr.bench.html#options +// +// name, crate, consts, types, sample_count, sample_size, threads +// counters, min_time, max_time, skip_ext_time, ignore + +fn main() { + divan::main(); +} + +/// These settings affect DB performance and correctness. +/// +/// Important: the default settings are not optimal, +/// eg: no read cache. +/// +/// Adjust and re-run the benchmarks to see effects. +/// +/// Rust docs: (basic) +/// https://docs.rs/rs-leveldb/0.1.5/leveldb/database/options/struct.Options.html +/// +/// C++ docs: (complete) +/// https://github.com/google/leveldb/blob/068d5ee1a3ac40dabd00d211d5013af44be55bea/include/leveldb/options.h +fn db_options() -> Option { + Some(Options { + // default: false + create_if_missing: true, + + // default: false + error_if_exists: true, + + // default: false + paranoid_checks: false, + + // default: None --> (4 * 1024 * 1024) + write_buffer_size: None, + + // default: None --> 1000 + max_open_files: None, + + // default: None --> 4 * 1024 + block_size: None, + + // default: None --> 16 + block_restart_interval: None, + + // default: Compression::No + // or: Compression::Snappy + compression: Compression::No, + + // default: None --> 8MB + // cache: None, + cache: None, + // note: tests put 128 bytes in each entry. + // 100 entries = 12,800 bytes. + // + // Warning: WriteBatch.put() tends to crash + // when this value is Some(Cache::new(..)) + // instead of None. + }) +} + +fn read_options(verify_checksums: bool, fill_cache: bool) -> Option { + Some(ReadOptions { + verify_checksums, + fill_cache, + }) +} +fn read_options_default() -> Option { + Some(ReadOptions::new()) +} + +fn write_options(sync: bool) -> Option { + Some(WriteOptions { sync }) +} + +fn value() -> Vec { + (0..127).collect() +} + +mod write_100_entries { + use super::*; + + // note: numbers > 100 make the sync_on_write::put() test really slow. + const NUM_WRITE_ITEMS: u32 = 100; + + mod puts { + use super::*; + + fn put(bencher: Bencher, sync: bool) { + let mut db = DB::open_new_test_database( + true, + db_options(), + read_options_default(), + write_options(sync), + ) + .unwrap(); + + bencher.bench_local(|| { + for i in 0..NUM_WRITE_ITEMS { + let _ = db.put(&i, &value()); + } + }); + } + + fn batch_put(bencher: Bencher, sync: bool) { + let mut db = DB::open_new_test_database( + true, + db_options(), + read_options_default(), + write_options(sync), + ) + .unwrap(); + + bencher.bench_local(|| { + let wb = WriteBatch::new(); + for i in 0..NUM_WRITE_ITEMS { + wb.put(&i, &value()); + } + let _ = db.write(&wb, sync); + }); + } + + fn batch_put_write(bencher: Bencher, sync: bool) { + let mut db = DB::open_new_test_database( + true, + db_options(), + read_options_default(), + write_options(sync), + ) + .unwrap(); + + let wb = WriteBatch::new(); + for i in 0..NUM_WRITE_ITEMS { + wb.put(&i, &value()); + } + + bencher.bench_local(|| { + let _ = db.write(&wb, sync); + }); + } + + mod sync_on_write { + use super::*; + + #[divan::bench] + fn put(bencher: Bencher) { + super::put(bencher, true); + } + + #[divan::bench] + fn batch_put(bencher: Bencher) { + super::batch_put(bencher, true); + } + + #[divan::bench] + fn batch_put_write(bencher: Bencher) { + super::batch_put_write(bencher, true); + } + } + + mod no_sync_on_write { + use super::*; + + #[divan::bench] + fn put(bencher: Bencher) { + super::put(bencher, false); + } + + #[divan::bench] + fn batch_put(bencher: Bencher) { + super::batch_put(bencher, false); + } + + #[divan::bench] + fn batch_put_write(bencher: Bencher) { + super::batch_put_write(bencher, false); + } + } + } + + mod deletes { + use super::*; + + fn delete(bencher: Bencher, sync: bool) { + let mut db = DB::open_new_test_database( + true, + db_options(), + read_options_default(), + write_options(sync), + ) + .unwrap(); + + for i in 0..NUM_WRITE_ITEMS { + let _ = db.put(&i, &value()); + } + + bencher.bench_local(|| { + for i in 0..NUM_WRITE_ITEMS { + let _ = db.delete(&i); + } + }); + } + + fn batch_delete(bencher: Bencher, sync: bool) { + let mut db = DB::open_new_test_database( + true, + db_options(), + read_options_default(), + write_options(sync), + ) + .unwrap(); + + // batch write items, unsync + let wb = WriteBatch::new(); + for i in 0..NUM_WRITE_ITEMS { + wb.put(&i, &value()); + } + let _ = db.write(&wb, false); + + // batch delete items, sync + let wb_del = WriteBatch::new(); + + bencher.bench_local(|| { + for i in 0..NUM_WRITE_ITEMS { + wb.delete(&i); + } + let _ = db.write(&wb_del, sync); + }); + } + + fn batch_delete_write(bencher: Bencher, sync: bool) { + let mut db = DB::open_new_test_database( + true, + db_options(), + read_options_default(), + write_options(sync), + ) + .unwrap(); + + // batch write items, unsync + let wb = WriteBatch::new(); + for i in 0..NUM_WRITE_ITEMS { + wb.put(&i, &value()); + } + let _ = db.write(&wb, false); + + // batch delete items, sync + let wb_del = WriteBatch::new(); + for i in 0..NUM_WRITE_ITEMS { + wb.delete(&i); + } + + bencher.bench_local(|| { + let _ = db.write(&wb_del, sync); + }); + } + + mod sync_on_write { + use super::*; + + #[divan::bench] + fn delete(bencher: Bencher) { + super::delete(bencher, true); + } + + #[divan::bench] + fn batch_delete(bencher: Bencher) { + super::batch_delete(bencher, true); + } + + #[divan::bench] + fn batch_delete_write(bencher: Bencher) { + super::batch_delete_write(bencher, true); + } + } + + mod no_sync_on_write { + use super::*; + + #[divan::bench] + fn delete(bencher: Bencher) { + super::delete(bencher, false); + } + + #[divan::bench] + fn batch_delete(bencher: Bencher) { + super::batch_delete(bencher, false); + } + + #[divan::bench] + fn batch_delete_write(bencher: Bencher) { + super::batch_delete_write(bencher, false); + } + } + } +} + +mod read_100_entries { + use super::*; + + const NUM_READ_ITEMS: u32 = 100; + + mod gets { + use super::*; + + fn get(bencher: Bencher, num_reads: usize, cache: bool, verify_checksum: bool) { + let mut db = DB::open_new_test_database( + true, + db_options(), + read_options(verify_checksum, cache), + write_options(false), + ) + .unwrap(); + + for i in 0..NUM_READ_ITEMS { + let _ = db.put(&i, &value()); + } + + bencher.bench_local(|| { + for i in 0..NUM_READ_ITEMS { + for _j in 0..num_reads { + let _ = db.get(&i); + } + } + }); + } + + mod get_each_entry_1_time { + use super::*; + + mod fill_cache { + use super::*; + + mod verify_checksums { + use super::*; + + #[divan::bench] + fn get(bencher: Bencher) { + super::get(bencher, 1, true, true); + } + } + + mod no_verify_checksums { + use super::*; + + #[divan::bench] + fn get(bencher: Bencher) { + super::get(bencher, 1, true, false); + } + } + } + mod no_fill_cache { + use super::*; + + mod verify_checksums { + use super::*; + + #[divan::bench] + fn get(bencher: Bencher) { + super::get(bencher, 1, false, true); + } + } + + mod no_verify_checksums { + use super::*; + + #[divan::bench] + fn get(bencher: Bencher) { + super::get(bencher, 1, false, false); + } + } + } + } + + mod get_each_entry_20_times { + use super::*; + + mod fill_cache { + use super::*; + + mod verify_checksums { + use super::*; + + #[divan::bench] + fn get(bencher: Bencher) { + super::get(bencher, 20, true, true); + } + } + + mod no_verify_checksums { + use super::*; + + #[divan::bench] + fn get(bencher: Bencher) { + super::get(bencher, 20, true, false); + } + } + } + mod no_fill_cache { + use super::*; + + mod verify_checksums { + use super::*; + + #[divan::bench] + fn get(bencher: Bencher) { + super::get(bencher, 20, false, true); + } + } + + mod no_verify_checksums { + use super::*; + + #[divan::bench] + fn get(bencher: Bencher) { + super::get(bencher, 20, false, false); + } + } + } + } + } +} + +mod storage_schema { + + mod dbtvec {} +} + +mod storage_vec {} diff --git a/benches/sync_atomic.rs b/benches/sync_atomic.rs new file mode 100644 index 00000000..657634a2 --- /dev/null +++ b/benches/sync_atomic.rs @@ -0,0 +1,98 @@ +//! This performs simple tests of acquiring locks +//! for AtomicRw (Arc>) and AtomicMutex (Arc>). +//! +//! Basically it is comparing RwLock vs Mutex, through our wrappers. +//! People say that Mutex is "faster", but is that true and +//! by how much? That's what we attempt to measure. +//! +//! Initial results indicate that Mutex is a little faster +//! but not that much. +//! +//! Note that: +//! 1. `lock` and `lock_guard` denote read-lock acquisitions +//! 2. `lock_mut` and `lock_guard_mut` denote write-lock acquisitions +//! 3. For mutex, only write-lock acquisitions are possible. +//! +//! sync_atomic fastest │ slowest │ median │ mean │ samples │ iters +//! ├─ lock │ │ │ │ │ +//! │ ╰─ rw │ │ │ │ │ +//! │ ╰─ lock_guard 169.1 µs │ 210.6 µs │ 169.1 µs │ 175.3 µs │ 100 │ 100 +//! ╰─ lock_mut │ │ │ │ │ +//! ├─ mutex │ │ │ │ │ +//! │ ╰─ lock_guard_mut 131.8 µs │ 217.9 µs │ 131.8 µs │ 136.5 µs │ 100 │ 100 +//! ╰─ rw │ │ │ │ │ +//! ╰─ lock_guard_mut 131.8 µs │ 153.8 µs │ 131.8 µs │ 132.7 µs │ 100 │ 100 +//! +//! Analysis: +//! 1. RwLock and Mutex write-lock acquisitions are basically the same. +//! 2. RwLock read-lock acquisitions are about 22% slower than write-lock acquisitions +//! which seems acceptable for most uses. + +use divan::Bencher; +use neptune_core::locks::std::{AtomicMutex, AtomicRw}; + +fn main() { + divan::main(); +} + +mod lock { + use super::*; + + // note: numbers > 100 make the sync_on_write::put() test really slow. + const NUM_ACQUIRES: u32 = 10000; + + mod rw { + use super::*; + + #[divan::bench] + fn lock_guard(bencher: Bencher) { + let atom = AtomicRw::from(true); + + bencher.bench_local(|| { + for _i in 0..NUM_ACQUIRES { + let _g = atom.lock_guard(); + } + }); + } + } + + // There is no mutex mod because mutex does not have + // read-only locks. +} + +mod lock_mut { + use super::*; + + // note: numbers > 100 make the sync_on_write::put() test really slow. + const NUM_ACQUIRES: u32 = 10000; + + mod rw { + use super::*; + + #[divan::bench] + fn lock_guard_mut(bencher: Bencher) { + let mut atom = AtomicRw::from(true); + + bencher.bench_local(|| { + for _i in 0..NUM_ACQUIRES { + let _g = atom.lock_guard_mut(); + } + }); + } + } + + mod mutex { + use super::*; + + #[divan::bench] + fn lock_guard_mut(bencher: Bencher) { + let atom = AtomicMutex::from(true); + + bencher.bench_local(|| { + for _i in 0..NUM_ACQUIRES { + let _g = atom.lock_guard(); + } + }); + } + } +} diff --git a/src/bin/dashboard_src/overview_screen.rs b/src/bin/dashboard_src/overview_screen.rs index 326859cf..cb52df41 100644 --- a/src/bin/dashboard_src/overview_screen.rs +++ b/src/bin/dashboard_src/overview_screen.rs @@ -93,7 +93,7 @@ impl OverviewData { ram_used: Default::default(), } } - pub fn test() -> Self { + pub async fn test() -> Self { OverviewData { available_balance: Some(NeptuneCoins::zero()), timelocked_balance: Some(NeptuneCoins::zero()), diff --git a/src/config_models/data_directory.rs b/src/config_models/data_directory.rs index a7eb610b..fa20f613 100644 --- a/src/config_models/data_directory.rs +++ b/src/config_models/data_directory.rs @@ -58,6 +58,7 @@ impl DataDirectory { .read(true) .write(true) .create(true) + .truncate(false) .open(file_path) .await .context("open_ensure_parent_dir_exists") diff --git a/src/connect_to_peers.rs b/src/connect_to_peers.rs index cf608e93..afb64a61 100644 --- a/src/connect_to_peers.rs +++ b/src/connect_to_peers.rs @@ -483,8 +483,8 @@ mod connect_tests { #[tokio::test] async fn test_outgoing_connection_succeed() -> Result<()> { let network = Network::Alpha; - let other_handshake = get_dummy_handshake_data_for_genesis(network); - let own_handshake = get_dummy_handshake_data_for_genesis(network); + let other_handshake = get_dummy_handshake_data_for_genesis(network).await; + let own_handshake = get_dummy_handshake_data_for_genesis(network).await; let mock = Builder::new() .write(&to_bytes(&PeerMessage::Handshake(Box::new(( MAGIC_STRING_REQUEST.to_vec(), @@ -536,8 +536,8 @@ mod connect_tests { ) = get_test_genesis_setup(network, 1).await?; // Get an address for a peer that's not already connected - let (other_handshake, peer_sa) = get_dummy_peer_connection_data_genesis(network, 1); - let own_handshake = get_dummy_handshake_data_for_genesis(network); + let (other_handshake, peer_sa) = get_dummy_peer_connection_data_genesis(network, 1).await; + let own_handshake = get_dummy_handshake_data_for_genesis(network).await; let mut status = check_if_connection_is_allowed( state_lock.clone(), @@ -677,8 +677,8 @@ mod connect_tests { // object will panic, and the `await` operator will evaluate // to Error. let network = Network::Alpha; - let other_handshake = get_dummy_handshake_data_for_genesis(network); - let own_handshake = get_dummy_handshake_data_for_genesis(network); + let other_handshake = get_dummy_handshake_data_for_genesis(network).await; + let own_handshake = get_dummy_handshake_data_for_genesis(network).await; let mock = Builder::new() .read(&to_bytes(&PeerMessage::Handshake(Box::new(( MAGIC_STRING_REQUEST.to_vec(), @@ -718,8 +718,8 @@ mod connect_tests { #[tokio::test] async fn test_incoming_connection_fail_bad_magic_value() -> Result<()> { let network = Network::Alpha; - let other_handshake = get_dummy_handshake_data_for_genesis(network); - let own_handshake = get_dummy_handshake_data_for_genesis(network); + let other_handshake = get_dummy_handshake_data_for_genesis(network).await; + let own_handshake = get_dummy_handshake_data_for_genesis(network).await; let mock = Builder::new() .read(&to_bytes(&PeerMessage::Handshake(Box::new(( MAGIC_STRING_RESPONSE.to_vec(), @@ -747,8 +747,8 @@ mod connect_tests { #[traced_test] #[tokio::test] async fn test_incoming_connection_fail_bad_network() -> Result<()> { - let other_handshake = get_dummy_handshake_data_for_genesis(Network::Testnet); - let own_handshake = get_dummy_handshake_data_for_genesis(Network::Alpha); + let other_handshake = get_dummy_handshake_data_for_genesis(Network::Testnet).await; + let own_handshake = get_dummy_handshake_data_for_genesis(Network::Alpha).await; let mock = Builder::new() .read(&to_bytes(&PeerMessage::Handshake(Box::new(( MAGIC_STRING_REQUEST.to_vec(), @@ -780,7 +780,7 @@ mod connect_tests { #[traced_test] #[tokio::test] async fn test_incoming_connection_fail_bad_version() { - let mut other_handshake = get_dummy_handshake_data_for_genesis(Network::Testnet); + let mut other_handshake = get_dummy_handshake_data_for_genesis(Network::Testnet).await; let (_peer_broadcast_tx, from_main_rx_clone, to_main_tx, _to_main_rx1, state_lock, _hsd) = get_test_genesis_setup(Network::Alpha, 0).await.unwrap(); let state = state_lock.lock_guard().await; @@ -843,8 +843,8 @@ mod connect_tests { // In this scenario a node attempts to make an ingoing connection but the max // peer count should prevent a new incoming connection from being accepted. let network = Network::Alpha; - let other_handshake = get_dummy_handshake_data_for_genesis(network); - let own_handshake = get_dummy_handshake_data_for_genesis(network); + let other_handshake = get_dummy_handshake_data_for_genesis(network).await; + let own_handshake = get_dummy_handshake_data_for_genesis(network).await; let mock = Builder::new() .read(&to_bytes(&PeerMessage::Handshake(Box::new(( MAGIC_STRING_REQUEST.to_vec(), @@ -873,7 +873,7 @@ mod connect_tests { cli.max_peers = 2; state_lock.set_cli(cli).await; - let (_, _, _latest_block_header) = get_dummy_latest_block(None); + let (_, _, _latest_block_header) = get_dummy_latest_block(None).await; let answer = answer_peer( mock, state_lock.clone(), @@ -894,8 +894,8 @@ mod connect_tests { // In this scenario a peer has been banned, and is attempting to make an ingoing // connection. This should not be possible. let network = Network::Alpha; - let other_handshake = get_dummy_handshake_data_for_genesis(network); - let own_handshake = get_dummy_handshake_data_for_genesis(network); + let other_handshake = get_dummy_handshake_data_for_genesis(network).await; + let own_handshake = get_dummy_handshake_data_for_genesis(network).await; let mock = Builder::new() .read(&to_bytes(&PeerMessage::Handshake(Box::new(( MAGIC_STRING_REQUEST.to_vec(), diff --git a/src/database/leveldb.rs b/src/database/leveldb.rs new file mode 100644 index 00000000..08a42263 --- /dev/null +++ b/src/database/leveldb.rs @@ -0,0 +1,601 @@ +//! [`DB`] wraps [`Database`] and provides +//! functionality for reading the database on-disk path +//! as well as destroying the on-disk database manually +//! or automatically upon drop. +//! +//! auto-destroy-on-drop is needed for unit tests that use the DB. + +use leveldb::{ + batch::{Batch, WriteBatch}, + compaction::Compaction, + database::comparator::Comparator, + database::Database, + error::Error as DbError, + iterator::{Iterable, Iterator, KeyIterator, ValueIterator}, + key::IntoLevelDBKey, + options::{Options, ReadOptions, WriteOptions}, + snapshots::{Snapshot, Snapshots}, +}; +use rand::distributions::Alphanumeric; +use rand::distributions::DistString; +use std::path::Path; +use std::sync::Arc; + +/// `DbIntMut` provides thread-safe access to LevelDB API with `&self` setters +/// +/// Interior mutability is available without rust locks because the underlying +/// C++ levelDB API is internally thread-safe. +/// +/// If `&self` setters are not needed, prefer [`DB`] instead. +// +// This also provides an abstraction layer which enables +// us to provide an API that is somewhat backwards compatible +// with rusty-leveldb. For example, our get() and put() +// do not require ReadOptions and WriteOptions param. +// +// Do not add any public (mutable) fields to this struct. +#[derive(Debug, Clone)] +pub struct DbIntMut { + // note: these must be private and unchanged after creation. + + // This Option is needed for the Drop impl. See comments there. + // All other methods can call unwrap() because constructors always + // set Some(..) + db: Option>, // Send + Sync. Arc is so we can derive Clone. + path: std::path::PathBuf, + destroy_db_on_drop: bool, + read_options: ReadOptions, + write_options: WriteOptions, +} + +#[allow(dead_code)] +impl DbIntMut { + /// Open a new database + /// + /// If the database is missing, the behaviour depends on `options.create_if_missing`. + /// The database will be created using the settings given in `options`. + #[inline] + pub fn open(name: &Path, options: &Options) -> Result { + let db = Database::open(name, options)?; + Ok(Self { + db: Some(Arc::new(db)), + path: name.into(), + destroy_db_on_drop: false, + read_options: ReadOptions::new(), + write_options: WriteOptions::new(), + }) + } + + /// Open a new database + /// + /// If the database is missing, the behaviour depends on `options.create_if_missing`. + /// The database will be created using the settings given in `options`. + #[inline] + pub fn open_with_options( + name: &Path, + options: &Options, + read_options: ReadOptions, + write_options: WriteOptions, + ) -> Result { + let db = Database::open(name, options)?; + Ok(Self { + db: Some(Arc::new(db)), + path: name.into(), + destroy_db_on_drop: false, + read_options, + write_options, + }) + } + + /// Open a new database with a custom comparator + /// + /// If the database is missing, the behaviour depends on `options.create_if_missing`. + /// The database will be created using the settings given in `options`. + /// + /// The comparator must implement a total ordering over the keyspace. + /// + /// For keys that implement Ord, consider the `OrdComparator`. + #[inline] + pub fn open_with_comparator( + name: &Path, + options: &Options, + comparator: C, + ) -> Result { + let db = Database::open_with_comparator(name, options, comparator)?; + Ok(Self { + db: Some(Arc::new(db)), + path: name.into(), + destroy_db_on_drop: false, + read_options: ReadOptions::new(), + write_options: WriteOptions::new(), + }) + } + + /// Creates and opens a test database + /// + /// The database will be created in the system + /// temp directory with prefix "test-db-" followed + /// by a random string. + /// + /// if destroy_db_on_drop is true, the database on-disk + /// files will be wiped when the DB struct is dropped. + pub fn open_new_test_database( + destroy_db_on_drop: bool, + options: Option, + read_options: Option, + write_options: Option, + ) -> Result { + let path = std::env::temp_dir().join(format!( + "test-db-{}", + Alphanumeric.sample_string(&mut rand::thread_rng(), 10) + )); + Self::open_test_database( + &path, + destroy_db_on_drop, + options, + read_options, + write_options, + ) + } + + /// Opens an existing (test?) database, with auto-destroy option. + /// + /// if destroy_db_on_drop is true, the database on-disk + /// files will be wiped when the DB struct is dropped. + /// This is usually useful only for unit-test purposes. + pub fn open_test_database( + path: &std::path::Path, + destroy_db_on_drop: bool, + options: Option, + read_options: Option, + write_options: Option, + ) -> Result { + let mut opt = options.unwrap_or_else(Options::new); + let read_opt = read_options.unwrap_or_else(ReadOptions::new); + let write_opt = write_options.unwrap_or_else(WriteOptions::new); + + opt.create_if_missing = true; + opt.error_if_exists = false; + + let mut db = Self::open_with_options(path, &opt, read_opt, write_opt)?; + db.destroy_db_on_drop = destroy_db_on_drop; + Ok(db) + } + + /// Set a key/val in the database + #[inline] + pub fn put(&self, key: &dyn IntoLevelDBKey, value: &[u8]) -> Result<(), DbError> { + self.db + .as_ref() + .unwrap() + .put(&self.write_options, key, value) + } + + /// Set a key/val in the database, with key as bytes. + #[inline] + pub fn put_u8(&self, key: &[u8], value: &[u8]) -> Result<(), DbError> { + self.db + .as_ref() + .unwrap() + .put_u8(&self.write_options, key, value) + } + + /// Get a value matching key from the database + #[inline] + pub fn get(&self, key: &dyn IntoLevelDBKey) -> Result>, DbError> { + self.db.as_ref().unwrap().get(&self.read_options, key) + } + + /// Get a value matching key from the database, with key as bytes + #[inline] + pub fn get_u8(&self, key: &[u8]) -> Result>, DbError> { + self.db.as_ref().unwrap().get_u8(&self.read_options, key) + } + + /// Delete an entry matching key from the database + #[inline] + pub fn delete(&self, key: &dyn IntoLevelDBKey) -> Result<(), DbError> { + self.db.as_ref().unwrap().delete(&self.write_options, key) + } + + /// Delete an entry matching key from the database, with key as bytes + #[inline] + pub fn delete_u8(&self, key: &[u8]) -> Result<(), DbError> { + self.db + .as_ref() + .unwrap() + .delete_u8(&self.write_options, key) + } + + /// Write the WriteBatch to database atomically + /// + /// The sync flag forces filesystem sync operation eg fsync + /// which will be slower than async writes, which are not + /// guaranteed to complete. See leveldb Docs. + pub fn write(&self, batch: &WriteBatch, sync: bool) -> Result<(), DbError> { + const WO_SYNC: WriteOptions = WriteOptions { sync: true }; + const WO_NOSYNC: WriteOptions = WriteOptions { sync: false }; + + self.db + .as_ref() + .unwrap() + .write(if sync { &WO_SYNC } else { &WO_NOSYNC }, batch) + } + + /// Write [`WriteBatch`] to database atomically + /// + /// Sync behavior will be determined by the WriteOptions + /// supplied at `DB` creation. + pub fn write_auto(&self, batch: &WriteBatch) -> Result<(), DbError> { + self.db.as_ref().unwrap().write(&self.write_options, batch) + } + + /// returns the directory path of the database files on disk. + #[inline] + pub fn path(&self) -> &std::path::PathBuf { + &self.path + } + + /// returns `destroy_db_on_drop` setting + #[inline] + pub fn destroy_db_on_drop(&self) -> bool { + self.destroy_db_on_drop + } + + /// Wipe the database files, if existing. + fn destroy_db(&self) -> Result<(), std::io::Error> { + match self.path.exists() { + true => std::fs::remove_dir_all(&self.path), + false => Ok(()), + } + } +} + +impl Drop for DbIntMut { + #[inline] + fn drop(&mut self) { + if self.destroy_db_on_drop { + { + // note: this block is only needed on windows, though it works + // on other platforms. Windows won't allow deletion of the + // underlying DB file while it remains open. The file doesn't + // get closed until the the `rs_leveldb::DB` is dropped, which calls + // the C API leveldb_close(). + // + // So we must drop the `DB` held by `self`, but to do that we must + // obtain ownership of the `Arc`. + // + // `Self::db` is an `Option` because `Option::take()` allows extracting + // an owned value with an `&mut` reference. Whereas `Cell`, `Refcell` + // require a `self` (for `into_inner()`), but `impl Drop` provides us + // only an `&mut self` reference. + // + // `mem::replace()` is another way to do it, but then we must replace + // the `DB` with another `DB` which also opens a file, so we would + // just create the problem again. + + // get `Arc` out of the `Option`, and replace with `None`. + let db_opt = self.db.take(); + + // now we own the `Arc`, so we can drop it, and `DB` with it. + if let Some(db_arc) = db_opt { + drop(db_arc); + } + } + + // note: we do not panic if the database directory + // cannot be removed. Perhaps revisit later. + let _ = self.destroy_db(); + } + } +} + +impl Batch for DbIntMut { + #[inline] + fn write(&self, options: &WriteOptions, batch: &WriteBatch) -> Result<(), DbError> { + self.db.as_ref().unwrap().write(options, batch) + } +} + +impl<'a> Compaction<'a> for DbIntMut { + #[inline] + fn compact(&self, start: &'a [u8], limit: &'a [u8]) { + self.db.as_ref().unwrap().compact(start, limit) + } +} + +impl<'a> Iterable<'a> for DbIntMut { + #[inline] + fn iter(&'a self, options: &ReadOptions) -> Iterator<'a> { + self.db.as_ref().unwrap().iter(options) + } + + #[inline] + fn keys_iter(&'a self, options: &ReadOptions) -> KeyIterator<'a> { + self.db.as_ref().unwrap().keys_iter(options) + } + + #[inline] + fn value_iter(&'a self, options: &ReadOptions) -> ValueIterator<'a> { + self.db.as_ref().unwrap().value_iter(options) + } +} + +impl Snapshots for DbIntMut { + fn snapshot(&self) -> Snapshot { + self.db.as_ref().unwrap().snapshot() + } +} + +/// `DB` provides thread-safe access to LevelDB API with `&mut self` setters. +/// +/// `DB` is a newtype wrapper for [`DbIntMut`] that hides the interior mutability +/// of the underlying C++ levelDB API, which is internally thread-safe. +/// +/// If interior mutability is needed, use [`DbIntMut`] instead. +// +// This also provides an abstraction layer which enables +// us to provide an API that is somewhat backwards compatible +// with rusty-leveldb. For example, our get() and put() +// do not require ReadOptions and WriteOptions param. +#[derive(Debug, Clone)] +pub struct DB(DbIntMut); + +#[allow(dead_code)] +impl DB { + /// Open a new database + /// + /// If the database is missing, the behaviour depends on `options.create_if_missing`. + /// The database will be created using the settings given in `options`. + #[inline] + pub fn open(name: &Path, options: &Options) -> Result { + Ok(Self(DbIntMut::open(name, options)?)) + } + + /// Open a new database + /// + /// If the database is missing, the behaviour depends on `options.create_if_missing`. + /// The database will be created using the settings given in `options`. + #[inline] + pub fn open_with_options( + name: &Path, + options: &Options, + read_options: ReadOptions, + write_options: WriteOptions, + ) -> Result { + Ok(Self(DbIntMut::open_with_options( + name, + options, + read_options, + write_options, + )?)) + } + + /// Open a new database with a custom comparator + /// + /// If the database is missing, the behaviour depends on `options.create_if_missing`. + /// The database will be created using the settings given in `options`. + /// + /// The comparator must implement a total ordering over the keyspace. + /// + /// For keys that implement Ord, consider the `OrdComparator`. + #[inline] + pub fn open_with_comparator( + name: &Path, + options: &Options, + comparator: C, + ) -> Result { + Ok(Self(DbIntMut::open_with_comparator( + name, options, comparator, + )?)) + } + + /// Creates and opens a test database + /// + /// The database will be created in the system + /// temp directory with prefix "test-db-" followed + /// by a random string. + /// + /// if destroy_db_on_drop is true, the database on-disk + /// files will be wiped when the DB struct is dropped. + pub fn open_new_test_database( + destroy_db_on_drop: bool, + options: Option, + read_options: Option, + write_options: Option, + ) -> Result { + Ok(Self(DbIntMut::open_new_test_database( + destroy_db_on_drop, + options, + read_options, + write_options, + )?)) + } + + /// Opens an existing (test?) database, with auto-destroy option. + /// + /// if destroy_db_on_drop is true, the database on-disk + /// files will be wiped when the DB struct is dropped. + /// This is usually useful only for unit-test purposes. + pub fn open_test_database( + path: &std::path::Path, + destroy_db_on_drop: bool, + options: Option, + read_options: Option, + write_options: Option, + ) -> Result { + Ok(Self(DbIntMut::open_test_database( + path, + destroy_db_on_drop, + options, + read_options, + write_options, + )?)) + } + + /// Set a key/val in the database + #[inline] + pub fn put(&mut self, key: &dyn IntoLevelDBKey, value: &[u8]) -> Result<(), DbError> { + self.0.put(key, value) + } + + /// Set a key/val in the database, with key as bytes. + #[inline] + pub fn put_u8(&mut self, key: &[u8], value: &[u8]) -> Result<(), DbError> { + self.0.put_u8(key, value) + } + + /// Get a value matching key from the database + #[inline] + pub fn get(&self, key: &dyn IntoLevelDBKey) -> Result>, DbError> { + self.0.get(key) + } + + /// Get a value matching key from the database, with key as bytes + #[inline] + pub fn get_u8(&self, key: &[u8]) -> Result>, DbError> { + self.0.get_u8(key) + } + + /// Delete an entry matching key from the database + #[inline] + pub fn delete(&mut self, key: &dyn IntoLevelDBKey) -> Result<(), DbError> { + self.0.delete(key) + } + + /// Delete an entry matching key from the database, with key as bytes + #[inline] + pub fn delete_u8(&mut self, key: &[u8]) -> Result<(), DbError> { + self.0.delete_u8(key) + } + + /// Write the WriteBatch to database atomically + /// + /// The sync flag forces filesystem sync operation eg fsync + /// which will be slower than async writes, which are not + /// guaranteed to complete. See leveldb Docs. + pub fn write(&mut self, batch: &WriteBatch, sync: bool) -> Result<(), DbError> { + self.0.write(batch, sync) + } + + /// Write the WriteBatch to database atomically + /// + /// The sync flag forces filesystem sync operation eg fsync + /// which will be slower than async writes, which are not + /// guaranteed to complete. See leveldb Docs. + pub fn write_batch_iter( + &mut self, + batch: impl IntoIterator, + sync: bool, + ) -> Result<(), DbError> { + let write_batch = WriteBatch::new(); + for (key, value) in batch.into_iter() { + write_batch.put(&key, value); + } + + self.0.write(&write_batch, sync) + } + + /// Write [`WriteBatch`] to database atomically + /// + /// Sync behavior will be determined by the WriteOptions + /// supplied at `DB` creation. + pub fn write_auto(&mut self, batch: &WriteBatch) -> Result<(), DbError> { + self.0.write_auto(batch) + } + + /// Write the WriteBatch to database atomically + /// + /// The sync flag forces filesystem sync operation eg fsync + /// which will be slower than async writes, which are not + /// guaranteed to complete. See leveldb Docs. + pub fn write_batch_iter_auto( + &mut self, + batch: impl IntoIterator, + ) -> Result<(), DbError> { + let write_batch = WriteBatch::new(); + for (key, value) in batch.into_iter() { + write_batch.put(&key, value); + } + + self.0.write_auto(&write_batch) + } + + /// returns the directory path of the database files on disk. + #[inline] + pub fn path(&self) -> &std::path::PathBuf { + self.0.path() + } + + /// returns `destroy_db_on_drop` setting + #[inline] + pub fn destroy_db_on_drop(&self) -> bool { + self.0.destroy_db_on_drop() + } + + /// compacts the database file. should be called periodically. + #[inline] + pub fn compact<'a>(&mut self, start: &'a [u8], limit: &'a [u8]) { + self.0.compact(start, limit) + } + + /// Wipe the database files, if existing. + pub fn destroy_db(&mut self) -> Result<(), std::io::Error> { + self.0.destroy_db() + } +} + +impl<'a> Iterable<'a> for DB { + #[inline] + fn iter(&'a self, options: &ReadOptions) -> Iterator<'a> { + self.0.iter(options) + } + + #[inline] + fn keys_iter(&'a self, options: &ReadOptions) -> KeyIterator<'a> { + self.0.keys_iter(options) + } + + #[inline] + fn value_iter(&'a self, options: &ReadOptions) -> ValueIterator<'a> { + self.0.value_iter(options) + } +} + +impl Snapshots for DB { + fn snapshot(&self) -> Snapshot { + self.0.snapshot() + } +} + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn level_db_close_and_reload() { + // open new test database that will not be destroyed on close. + let mut db = DB::open_new_test_database(false, None, None, None).unwrap(); + let db_path = db.path().clone(); + + let key = "answer-to-everything"; + let val = vec![42]; + + let _ = db.put(&key, &val); + + drop(db); // close the DB. + + assert!(db_path.exists()); + + // open existing database that will be destroyed on close. + let db2 = DbIntMut::open_test_database(&db_path, true, None, None, None).unwrap(); + + let val2 = db2.get(&key).unwrap().unwrap(); + assert_eq!(val, val2); + + drop(db2); // close the DB. db_path dir is auto removed. + + assert!(!db_path.exists()); + } +} diff --git a/src/database/mod.rs b/src/database/mod.rs index fc07bf81..80b74b6d 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,3 +1,5 @@ +pub mod leveldb; mod neptune_leveldb; +pub mod storage; -pub use neptune_leveldb::{create_db_if_missing, NeptuneLevelDb}; +pub use neptune_leveldb::{create_db_if_missing, NeptuneLevelDb, WriteBatchAsync}; diff --git a/src/database/neptune_leveldb.rs b/src/database/neptune_leveldb.rs index 60f5dd4c..0bedf447 100644 --- a/src/database/neptune_leveldb.rs +++ b/src/database/neptune_leveldb.rs @@ -1,17 +1,16 @@ -use crate::prelude::twenty_first; +use super::leveldb::DB; use anyhow::Result; +use leveldb::{ + batch::WriteBatch, + iterator::Iterable, + options::{Options, ReadOptions, WriteOptions}, +}; +use leveldb_sys::Compression; use serde::de::DeserializeOwned; use serde::Serialize; use std::marker::PhantomData; use std::path::Path; use tokio::task; -use twenty_first::leveldb::{ - batch::WriteBatch, - iterator::Iterable, - options::{Options, ReadOptions, WriteOptions}, -}; -use twenty_first::leveldb_sys::Compression; -use twenty_first::storage::level_db::DB; struct NeptuneLevelDbInternal where @@ -23,6 +22,20 @@ where _value: PhantomData, } +impl From for NeptuneLevelDbInternal +where + Key: Serialize + DeserializeOwned, + Value: Serialize + DeserializeOwned, +{ + fn from(database: DB) -> Self { + Self { + database, + _key: Default::default(), + _value: Default::default(), + } + } +} + impl Clone for NeptuneLevelDbInternal where Key: Serialize + DeserializeOwned, @@ -88,18 +101,34 @@ where value_bytes.map(|bytes| bincode::deserialize(&bytes).unwrap()) } + fn get_u8(&mut self, key: &[u8]) -> Option> { + self.database.get_u8(key).unwrap() + } + fn put(&mut self, key: Key, value: Value) { let key_bytes: Vec = bincode::serialize(&key).unwrap(); let value_bytes: Vec = bincode::serialize(&value).unwrap(); self.database.put(&key_bytes, &value_bytes).unwrap(); } - fn batch_write(&mut self, entries: impl IntoIterator) { + fn put_u8(&mut self, key: &[u8], value: &[u8]) { + self.database.put_u8(key, value).unwrap() + } + + fn batch_write(&mut self, entries: WriteBatchAsync) { let batch = WriteBatch::new(); - for (key, value) in entries.into_iter() { - let key_bytes: Vec = bincode::serialize(&key).unwrap(); - let value_bytes: Vec = bincode::serialize(&value).unwrap(); - batch.put(&key_bytes, &value_bytes); + for op in entries.0.into_iter() { + match op { + WriteBatchOpAsync::Write(key, value) => { + let key_bytes: Vec = bincode::serialize(&key).unwrap(); + let value_bytes: Vec = bincode::serialize(&value).unwrap(); + batch.put(&key_bytes, &value_bytes); + } + WriteBatchOpAsync::Delete(key) => { + let key_bytes: Vec = bincode::serialize(&key).unwrap(); + batch.delete(&key_bytes); + } + } } self.database.write(&batch, true).unwrap(); @@ -208,6 +237,13 @@ where task::spawn_blocking(move || inner.get(key)).await.unwrap() } + pub async fn get_u8(&self, key: Vec) -> Option> { + let mut inner = self.0.clone(); + task::spawn_blocking(move || inner.get_u8(&key)) + .await + .unwrap() + } + /// Set database value asynchronously pub async fn put(&mut self, key: Key, value: Value) { let mut inner = self.0.clone(); @@ -216,11 +252,15 @@ where .unwrap() } + pub async fn put_u8(&mut self, key: Vec, value: Vec) { + let mut inner = self.0.clone(); + task::spawn_blocking(move || inner.put_u8(&key, &value)) + .await + .unwrap() + } + /// Write database values as a batch asynchronously - pub async fn batch_write( - &mut self, - entries: impl IntoIterator + Send + Sync + 'static, - ) { + pub async fn batch_write(&mut self, entries: WriteBatchAsync) { let mut inner = self.0.clone(); task::spawn_blocking(move || inner.batch_write(entries)) .await @@ -240,6 +280,76 @@ where let mut inner = self.0.clone(); task::spawn_blocking(move || inner.flush()).await.unwrap() } + + /// returns the directory path of the database files on disk. + #[inline] + pub fn path(&self) -> &std::path::PathBuf { + self.0.database.path() + } +} + +impl NeptuneLevelDb +where + Key: Serialize + DeserializeOwned + Send + Sync + 'static, + Value: Serialize + DeserializeOwned + Send + Sync + 'static, +{ + /// Creates and opens a test database + /// + /// The database will be created in the system + /// temp directory with prefix "test-db-" followed + /// by a random string. + /// + /// if destroy_db_on_drop is true, the database on-disk + /// files will be wiped when the DB struct is dropped. + pub async fn open_new_test_database( + destroy_db_on_drop: bool, + options: Option, + read_options: Option, + write_options: Option, + ) -> Result { + let options_async = options.map(OptionsAsync::from); + + let db = task::spawn_blocking(move || { + DB::open_new_test_database( + destroy_db_on_drop, + options_async.map(|o| o.into()), + read_options, + write_options, + ) + }) + .await??; + + Ok(Self(NeptuneLevelDbInternal::from(db))) + } + + /// Opens an existing (test?) database, with auto-destroy option. + /// + /// if destroy_db_on_drop is true, the database on-disk + /// files will be wiped when the DB struct is dropped. + /// This is usually useful only for unit-test purposes. + pub async fn open_test_database( + db_path: &std::path::Path, + destroy_db_on_drop: bool, + options: Option, + read_options: Option, + write_options: Option, + ) -> Result { + let path = db_path.to_path_buf(); + let options_async = options.map(OptionsAsync::from); + + let db = task::spawn_blocking(move || { + DB::open_test_database( + &path, + destroy_db_on_drop, + options_async.map(|o| o.into()), + read_options, + write_options, + ) + }) + .await??; + + Ok(Self(NeptuneLevelDbInternal::from(db))) + } } // We made this OptionsAsync struct because leveldb::options::Options cannot be @@ -248,7 +358,7 @@ where // send this OptionsAsync between threads, which does not have a Cache field. // // todo: add a cache_size option specified in bytes. -struct OptionsAsync { +pub(super) struct OptionsAsync { pub create_if_missing: bool, pub error_if_exists: bool, pub paranoid_checks: bool, @@ -302,3 +412,35 @@ impl From for Options { Self::from(&o) } } + +#[derive(Debug, Clone)] +enum WriteBatchOpAsync { + // args: key, val + Write(K, V), + + // args: key + Delete(K), +} + +#[derive(Debug, Clone)] +pub struct WriteBatchAsync(Vec>); + +impl WriteBatchAsync { + pub fn new() -> Self { + Self(vec![]) + } + + pub fn op_write(&mut self, key: K, value: V) { + self.0.push(WriteBatchOpAsync::Write(key, value)); + } + + pub fn op_delete(&mut self, key: K) { + self.0.push(WriteBatchOpAsync::Delete(key)); + } +} + +impl Default for WriteBatchAsync { + fn default() -> Self { + Self::new() + } +} diff --git a/src/database/storage/level_db.rs b/src/database/storage/level_db.rs new file mode 100644 index 00000000..9385d4ee --- /dev/null +++ b/src/database/storage/level_db.rs @@ -0,0 +1,564 @@ +//! [`DB`] wraps [`Database`] and provides +//! functionality for reading the database on-disk path +//! as well as destroying the on-disk database manually +//! or automatically upon drop. +//! +//! auto-destroy-on-drop is needed for unit tests that use the DB. + +use leveldb::{ + batch::{Batch, WriteBatch}, + compaction::Compaction, + database::comparator::Comparator, + database::Database, + error::Error as DbError, + iterator::{Iterable, Iterator, KeyIterator, ValueIterator}, + key::IntoLevelDBKey, + options::{Options, ReadOptions, WriteOptions}, + snapshots::{Snapshot, Snapshots}, +}; +use rand::distributions::DistString; +use rand_distr::Alphanumeric; +use std::path::Path; +use std::sync::Arc; + +/// `DbIntMut` provides thread-safe access to LevelDB API with `&self` setters +/// +/// Interior mutability is available without rust locks because the underlying +/// C++ levelDB API is internally thread-safe. +/// +/// If `&self` setters are not needed, prefer [`DB`] instead. +// +// This also provides an abstraction layer which enables +// us to provide an API that is somewhat backwards compatible +// with rusty-leveldb. For example, our get() and put() +// do not require ReadOptions and WriteOptions param. +// +// Do not add any public (mutable) fields to this struct. +#[derive(Debug, Clone)] +pub struct DbIntMut { + // note: these must be private and unchanged after creation. + + // This Option is needed for the Drop impl. See comments there. + // All other methods can call unwrap() because constructors always + // set Some(..) + db: Option>, // Send + Sync. Arc is so we can derive Clone. + path: std::path::PathBuf, + destroy_db_on_drop: bool, + read_options: ReadOptions, + write_options: WriteOptions, +} + +impl DbIntMut { + /// Open a new database + /// + /// If the database is missing, the behaviour depends on `options.create_if_missing`. + /// The database will be created using the settings given in `options`. + #[inline] + pub fn open(name: &Path, options: &Options) -> Result { + let db = Database::open(name, options)?; + Ok(Self { + db: Some(Arc::new(db)), + path: name.into(), + destroy_db_on_drop: false, + read_options: ReadOptions::new(), + write_options: WriteOptions::new(), + }) + } + + /// Open a new database + /// + /// If the database is missing, the behaviour depends on `options.create_if_missing`. + /// The database will be created using the settings given in `options`. + #[inline] + pub fn open_with_options( + name: &Path, + options: &Options, + read_options: ReadOptions, + write_options: WriteOptions, + ) -> Result { + let db = Database::open(name, options)?; + Ok(Self { + db: Some(Arc::new(db)), + path: name.into(), + destroy_db_on_drop: false, + read_options, + write_options, + }) + } + + /// Open a new database with a custom comparator + /// + /// If the database is missing, the behaviour depends on `options.create_if_missing`. + /// The database will be created using the settings given in `options`. + /// + /// The comparator must implement a total ordering over the keyspace. + /// + /// For keys that implement Ord, consider the `OrdComparator`. + #[inline] + pub fn open_with_comparator( + name: &Path, + options: &Options, + comparator: C, + ) -> Result { + let db = Database::open_with_comparator(name, options, comparator)?; + Ok(Self { + db: Some(Arc::new(db)), + path: name.into(), + destroy_db_on_drop: false, + read_options: ReadOptions::new(), + write_options: WriteOptions::new(), + }) + } + + /// Creates and opens a test database + /// + /// The database will be created in the system + /// temp directory with prefix "test-db-" followed + /// by a random string. + /// + /// if destroy_db_on_drop is true, the database on-disk + /// files will be wiped when the DB struct is dropped. + pub fn open_new_test_database( + destroy_db_on_drop: bool, + options: Option, + read_options: Option, + write_options: Option, + ) -> Result { + let path = std::env::temp_dir().join(format!( + "test-db-{}", + Alphanumeric.sample_string(&mut rand::thread_rng(), 10) + )); + Self::open_test_database( + &path, + destroy_db_on_drop, + options, + read_options, + write_options, + ) + } + + /// Opens an existing (test?) database, with auto-destroy option. + /// + /// if destroy_db_on_drop is true, the database on-disk + /// files will be wiped when the DB struct is dropped. + /// This is usually useful only for unit-test purposes. + pub fn open_test_database( + path: &std::path::Path, + destroy_db_on_drop: bool, + options: Option, + read_options: Option, + write_options: Option, + ) -> Result { + let mut opt = options.unwrap_or_else(Options::new); + let read_opt = read_options.unwrap_or_else(ReadOptions::new); + let write_opt = write_options.unwrap_or_else(WriteOptions::new); + + opt.create_if_missing = true; + opt.error_if_exists = false; + + let mut db = Self::open_with_options(path, &opt, read_opt, write_opt)?; + db.destroy_db_on_drop = destroy_db_on_drop; + Ok(db) + } + + /// Set a key/val in the database + #[inline] + pub fn put(&self, key: &dyn IntoLevelDBKey, value: &[u8]) -> Result<(), DbError> { + self.db + .as_ref() + .unwrap() + .put(&self.write_options, key, value) + } + + /// Set a key/val in the database, with key as bytes. + #[inline] + pub fn put_u8(&self, key: &[u8], value: &[u8]) -> Result<(), DbError> { + self.db + .as_ref() + .unwrap() + .put_u8(&self.write_options, key, value) + } + + /// Get a value matching key from the database + #[inline] + pub fn get(&self, key: &dyn IntoLevelDBKey) -> Result>, DbError> { + self.db.as_ref().unwrap().get(&self.read_options, key) + } + + /// Get a value matching key from the database, with key as bytes + #[inline] + pub fn get_u8(&self, key: &[u8]) -> Result>, DbError> { + self.db.as_ref().unwrap().get_u8(&self.read_options, key) + } + + /// Delete an entry matching key from the database + #[inline] + pub fn delete(&self, key: &dyn IntoLevelDBKey) -> Result<(), DbError> { + self.db.as_ref().unwrap().delete(&self.write_options, key) + } + + /// Delete an entry matching key from the database, with key as bytes + #[inline] + pub fn delete_u8(&self, key: &[u8]) -> Result<(), DbError> { + self.db + .as_ref() + .unwrap() + .delete_u8(&self.write_options, key) + } + + /// Write the WriteBatch to database atomically + /// + /// The sync flag forces filesystem sync operation eg fsync + /// which will be slower than async writes, which are not + /// guaranteed to complete. See leveldb Docs. + pub fn write(&self, batch: &WriteBatch, sync: bool) -> Result<(), DbError> { + const WO_SYNC: WriteOptions = WriteOptions { sync: true }; + const WO_NOSYNC: WriteOptions = WriteOptions { sync: false }; + + self.db + .as_ref() + .unwrap() + .write(if sync { &WO_SYNC } else { &WO_NOSYNC }, batch) + } + + /// Write [`WriteBatch`] to database atomically + /// + /// Sync behavior will be determined by the WriteOptions + /// supplied at `DB` creation. + pub fn write_auto(&self, batch: &WriteBatch) -> Result<(), DbError> { + self.db.as_ref().unwrap().write(&self.write_options, batch) + } + + /// returns the directory path of the database files on disk. + #[inline] + pub fn path(&self) -> &std::path::PathBuf { + &self.path + } + + /// returns `destroy_db_on_drop` setting + #[inline] + pub fn destroy_db_on_drop(&self) -> bool { + self.destroy_db_on_drop + } + + /// Wipe the database files, if existing. + fn destroy_db(&self) -> Result<(), std::io::Error> { + match self.path.exists() { + true => std::fs::remove_dir_all(&self.path), + false => Ok(()), + } + } +} + +impl Drop for DbIntMut { + #[inline] + fn drop(&mut self) { + if self.destroy_db_on_drop { + { + // note: this block is only needed on windows, though it works + // on other platforms. Windows won't allow deletion of the + // underlying DB file while it remains open. The file doesn't + // get closed until the the `rs_leveldb::DB` is dropped, which calls + // the C API leveldb_close(). + // + // So we must drop the `DB` held by `self`, but to do that we must + // obtain ownership of the `Arc`. + // + // `Self::db` is an `Option` because `Option::take()` allows extracting + // an owned value with an `&mut` reference. Whereas `Cell`, `Refcell` + // require a `self` (for `into_inner()`), but `impl Drop` provides us + // only an `&mut self` reference. + // + // `mem::replace()` is another way to do it, but then we must replace + // the `DB` with another `DB` which also opens a file, so we would + // just create the problem again. + + // get `Arc` out of the `Option`, and replace with `None`. + let db_opt = self.db.take(); + + // now we own the `Arc`, so we can drop it, and `DB` with it. + if let Some(db_arc) = db_opt { + drop(db_arc); + } + } + + // note: we do not panic if the database directory + // cannot be removed. Perhaps revisit later. + let _ = self.destroy_db(); + } + } +} + +// impl Batch for DbIntMut { +// #[inline] +// fn write(&self, options: &WriteOptions, batch: &WriteBatch) -> Result<(), DbError> { +// self.db.write(options, batch) +// } +// } + +impl<'a> Compaction<'a> for DbIntMut { + #[inline] + fn compact(&self, start: &'a [u8], limit: &'a [u8]) { + self.db.as_ref().unwrap().compact(start, limit) + } +} + +impl<'a> Iterable<'a> for DbIntMut { + #[inline] + fn iter(&'a self, options: &ReadOptions) -> Iterator<'a> { + self.db.as_ref().unwrap().iter(options) + } + + #[inline] + fn keys_iter(&'a self, options: &ReadOptions) -> KeyIterator<'a> { + self.db.as_ref().unwrap().keys_iter(options) + } + + #[inline] + fn value_iter(&'a self, options: &ReadOptions) -> ValueIterator<'a> { + self.db.as_ref().unwrap().value_iter(options) + } +} + +impl Snapshots for DbIntMut { + fn snapshot(&self) -> Snapshot { + self.db.as_ref().unwrap().snapshot() + } +} + +/// `DB` provides thread-safe access to LevelDB API with `&mut self` setters. +/// +/// `DB` is a newtype wrapper for [`DbIntMut`] that hides the interior mutability +/// of the underlying C++ levelDB API, which is internally thread-safe. +/// +/// If interior mutability is needed, use [`DbIntMut`] instead. +// +// This also provides an abstraction layer which enables +// us to provide an API that is somewhat backwards compatible +// with rusty-leveldb. For example, our get() and put() +// do not require ReadOptions and WriteOptions param. +#[derive(Debug, Clone)] +pub struct DB(DbIntMut); + +impl DB { + /// Open a new database + /// + /// If the database is missing, the behaviour depends on `options.create_if_missing`. + /// The database will be created using the settings given in `options`. + #[inline] + pub fn open(name: &Path, options: &Options) -> Result { + Ok(Self(DbIntMut::open(name, options)?)) + } + + /// Open a new database + /// + /// If the database is missing, the behaviour depends on `options.create_if_missing`. + /// The database will be created using the settings given in `options`. + #[inline] + pub fn open_with_options( + name: &Path, + options: &Options, + read_options: ReadOptions, + write_options: WriteOptions, + ) -> Result { + Ok(Self(DbIntMut::open_with_options( + name, + options, + read_options, + write_options, + )?)) + } + + /// Open a new database with a custom comparator + /// + /// If the database is missing, the behaviour depends on `options.create_if_missing`. + /// The database will be created using the settings given in `options`. + /// + /// The comparator must implement a total ordering over the keyspace. + /// + /// For keys that implement Ord, consider the `OrdComparator`. + #[inline] + pub fn open_with_comparator( + name: &Path, + options: &Options, + comparator: C, + ) -> Result { + Ok(Self(DbIntMut::open_with_comparator( + name, options, comparator, + )?)) + } + + /// Creates and opens a test database + /// + /// The database will be created in the system + /// temp directory with prefix "test-db-" followed + /// by a random string. + /// + /// if destroy_db_on_drop is true, the database on-disk + /// files will be wiped when the DB struct is dropped. + pub fn open_new_test_database( + destroy_db_on_drop: bool, + options: Option, + read_options: Option, + write_options: Option, + ) -> Result { + Ok(Self(DbIntMut::open_new_test_database( + destroy_db_on_drop, + options, + read_options, + write_options, + )?)) + } + + /// Opens an existing (test?) database, with auto-destroy option. + /// + /// if destroy_db_on_drop is true, the database on-disk + /// files will be wiped when the DB struct is dropped. + /// This is usually useful only for unit-test purposes. + pub fn open_test_database( + path: &std::path::Path, + destroy_db_on_drop: bool, + options: Option, + read_options: Option, + write_options: Option, + ) -> Result { + Ok(Self(DbIntMut::open_test_database( + path, + destroy_db_on_drop, + options, + read_options, + write_options, + )?)) + } + + /// Set a key/val in the database + #[inline] + pub fn put(&mut self, key: &dyn IntoLevelDBKey, value: &[u8]) -> Result<(), DbError> { + self.0.put(key, value) + } + + /// Set a key/val in the database, with key as bytes. + #[inline] + pub fn put_u8(&mut self, key: &[u8], value: &[u8]) -> Result<(), DbError> { + self.0.put_u8(key, value) + } + + /// Get a value matching key from the database + #[inline] + pub fn get(&self, key: &dyn IntoLevelDBKey) -> Result>, DbError> { + self.0.get(key) + } + + /// Get a value matching key from the database, with key as bytes + #[inline] + pub fn get_u8(&self, key: &[u8]) -> Result>, DbError> { + self.0.get_u8(key) + } + + /// Delete an entry matching key from the database + #[inline] + pub fn delete(&mut self, key: &dyn IntoLevelDBKey) -> Result<(), DbError> { + self.0.delete(key) + } + + /// Delete an entry matching key from the database, with key as bytes + #[inline] + pub fn delete_u8(&mut self, key: &[u8]) -> Result<(), DbError> { + self.0.delete_u8(key) + } + + /// Write the WriteBatch to database atomically + /// + /// The sync flag forces filesystem sync operation eg fsync + /// which will be slower than async writes, which are not + /// guaranteed to complete. See leveldb Docs. + pub fn write(&mut self, batch: &WriteBatch, sync: bool) -> Result<(), DbError> { + self.0.write(batch, sync) + } + + /// Write [`WriteBatch`] to database atomically + /// + /// Sync behavior will be determined by the WriteOptions + /// supplied at `DB` creation. + pub fn write_auto(&mut self, batch: &WriteBatch) -> Result<(), DbError> { + self.0.write_auto(batch) + } + + /// returns the directory path of the database files on disk. + #[inline] + pub fn path(&self) -> &std::path::PathBuf { + self.0.path() + } + + /// returns `destroy_db_on_drop` setting + #[inline] + pub fn destroy_db_on_drop(&self) -> bool { + self.0.destroy_db_on_drop() + } + + /// compacts the database file. should be called periodically. + #[inline] + pub fn compact<'a>(&mut self, start: &'a [u8], limit: &'a [u8]) { + self.0.compact(start, limit) + } + + /// Wipe the database files, if existing. + pub fn destroy_db(&mut self) -> Result<(), std::io::Error> { + self.0.destroy_db() + } +} + +impl<'a> Iterable<'a> for DB { + #[inline] + fn iter(&'a self, options: &ReadOptions) -> Iterator<'a> { + self.0.iter(options) + } + + #[inline] + fn keys_iter(&'a self, options: &ReadOptions) -> KeyIterator<'a> { + self.0.keys_iter(options) + } + + #[inline] + fn value_iter(&'a self, options: &ReadOptions) -> ValueIterator<'a> { + self.0.value_iter(options) + } +} + +impl Snapshots for DB { + fn snapshot(&self) -> Snapshot { + self.0.snapshot() + } +} + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn level_db_close_and_reload() { + // open new test database that will not be destroyed on close. + let mut db = DB::open_new_test_database(false, None, None, None).unwrap(); + let db_path = db.path().clone(); + + let key = "answer-to-everything"; + let val = vec![42]; + + let _ = db.put(&key, &val); + + drop(db); // close the DB. + + assert!(db_path.exists()); + + // open existing database that will be destroyed on close. + let db2 = DbIntMut::open_test_database(&db_path, true, None, None, None).unwrap(); + + let val2 = db2.get(&key).unwrap().unwrap(); + assert_eq!(val, val2); + + drop(db2); // close the DB. db_path dir is auto removed. + + assert!(!db_path.exists()); + } +} diff --git a/src/database/storage/mod.rs b/src/database/storage/mod.rs new file mode 100644 index 00000000..14c0c20a --- /dev/null +++ b/src/database/storage/mod.rs @@ -0,0 +1,33 @@ +#![warn(missing_docs)] +#![warn(rustdoc::unescaped_backticks)] +#![warn(rustdoc::broken_intra_doc_links)] + +//! Collection types backed by LevelDB. +//! +//! In particular: +//! - [`OrdinaryVec`](storage_vec::OrdinaryVec) provides a standard (in-memory) Vec +//! that implements the StorageVec trait. It is mainly useful for tests and doctests. +//! - [`SimpleRustyStorage`](storage_schema::SimpleRustyStorage) provides atomic NeptuneLevelDb writes across +//! any number of [`DbtVec`](storage_schema::DbtVec) or [`DbtSingleton`](storage_schema::DbtSingleton) "tables". +//! - [`NeptuneLevelDb`](crate::database::NeptuneLevelDb) provides a convenient wrapper for the LevelDB API. + +// For anyone reading this code and trying to understand the StorageVec trait and the DbSchema +// in particular may help speed understanding. +// +// 0. DbSchema::pending_writes holds an AtomicRw (Arc). +// PendingWrites is a list of pending DB operations that are waiting to +// persisted to the database. +// 1. Each logical table (DbtVec or DbtSingleton) created by a given DbSchema holds +// an Arc clone of the PendingWrites. Thus the list is shared between tables +// and DbSchema has a view of all pending writes, across all tables. +// 2. SimpleStorageReader provides DB access for the tables to read data as needed. +// It does not provide any API for them to write, so they can only write by adding +// an operation to PendingWrites. +// 3. SimpleStorageWriter::persist() reads all the PendingWrites in DbSchema and +// writes them to the DB, and then clears the list. +// 4. Table types such as DbtVec keep an internal cache of pending written data that +// must at all times match the logical state of the DB, as if it had already been +// written to. This cache is cleared when data is actually persisted. + +pub mod storage_schema; +pub mod storage_vec; diff --git a/src/database/storage/storage_schema/dbtsingleton.rs b/src/database/storage/storage_schema/dbtsingleton.rs new file mode 100644 index 00000000..89b65574 --- /dev/null +++ b/src/database/storage/storage_schema/dbtsingleton.rs @@ -0,0 +1,62 @@ +use std::{fmt::Debug, sync::Arc}; + +use super::{ + dbtsingleton_private::DbtSingletonPrivate, traits::*, PendingWrites, SimpleRustyReader, +}; +use crate::locks::tokio::AtomicRw; +use serde::{de::DeserializeOwned, Serialize}; + +/// Singleton type created by [`super::DbtSchema`] +/// +/// Data stored in a Singleton gets persisted to a +/// levelDb database. +#[derive(Debug)] +pub struct DbtSingleton { + // todo: unify inner. no longer necessary. + inner: DbtSingletonPrivate, +} + +impl DbtSingleton +where + V: Default + Clone + Serialize, +{ + // DbtSingleton can not be instantiated directly outside of storage_schema module + // use [Schema::new_singleton()] + #[inline] + pub(super) fn new( + key: u8, + write_ops: AtomicRw, + reader: Arc, + name: String, + ) -> Self { + let singleton = DbtSingletonPrivate::::new(key, write_ops, reader, name); + Self { inner: singleton } + } + + /// returns singleton value + #[inline] + pub async fn get(&self) -> V { + self.inner.get() + } + + /// set singleton value + #[inline] + pub async fn set(&mut self, t: V) { + self.inner.set(t).await; + } +} + +#[async_trait::async_trait] +impl DbTable for DbtSingleton +where + V: Clone + Default, + V: Serialize + DeserializeOwned + Send + Sync, +{ + #[inline] + async fn restore_or_new(&mut self) { + self.inner.current_value = match self.inner.reader.get(self.inner.key.into()).await { + Some(value) => value.into_any(), + None => V::default(), + }; + } +} diff --git a/src/database/storage/storage_schema/dbtsingleton_private.rs b/src/database/storage/storage_schema/dbtsingleton_private.rs new file mode 100644 index 00000000..2b55c920 --- /dev/null +++ b/src/database/storage/storage_schema/dbtsingleton_private.rs @@ -0,0 +1,65 @@ +use super::{PendingWrites, RustyValue, SimpleRustyReader, WriteOperation}; +use crate::locks::tokio::AtomicRw; +use serde::Serialize; +use std::fmt::{Debug, Formatter}; +use std::sync::Arc; + +// note: no locking is required in `DbtSingletonPrivate` because locking +// is performed in the `DbtSingleton` public wrapper. +pub(super) struct DbtSingletonPrivate { + pub(super) pending_writes: AtomicRw, + pub(super) key: u8, + pub(super) current_value: V, + pub(super) old_value: V, + pub(super) reader: Arc, + pub(super) name: String, +} + +impl Debug for DbtSingletonPrivate +where + V: Debug, +{ + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + f.debug_struct("DbtSingletonPrivate") + .field("key", &self.key) + .field("current_value", &self.current_value) + .field("old_value", &self.old_value) + .field("reader", &"Arc") + .field("name", &self.name) + .finish() + } +} + +impl DbtSingletonPrivate { + pub(super) fn new( + key: u8, + pending_writes: AtomicRw, + reader: Arc, + name: String, + ) -> Self { + Self { + key, + current_value: Default::default(), + old_value: Default::default(), + pending_writes, + reader, + name: name.to_owned(), + } + } + pub(super) fn get(&self) -> V { + self.current_value.clone() + } + + pub(super) async fn set(&mut self, v: V) { + self.pending_writes + .lock_guard_mut() + .await + .write_ops + .push(WriteOperation::Write( + self.key.into(), + RustyValue::from_any(&v), + )); + + self.current_value = v; + } +} diff --git a/src/database/storage/storage_schema/dbtvec.rs b/src/database/storage/storage_schema/dbtvec.rs new file mode 100644 index 00000000..23589935 --- /dev/null +++ b/src/database/storage/storage_schema/dbtvec.rs @@ -0,0 +1,173 @@ +use super::super::storage_vec::{traits::*, Index}; +use super::dbtvec_private::DbtVecPrivate; +use super::{traits::*, PendingWrites, SimpleRustyReader}; +use crate::locks::tokio::AtomicRw; +use serde::{de::DeserializeOwned, Serialize}; +use std::{fmt::Debug, sync::Arc}; + +use futures::Stream; + +/// A LevelDb-backed Vec for use with DbSchema +/// +/// Data stored in a DbtVec gets persisted to a levelDb database. +#[derive(Debug)] +pub struct DbtVec { + // todo: merge DbtVecPrivate into DbtVec + inner: DbtVecPrivate, +} + +impl DbtVec +where + V: Clone + Serialize, +{ + // DbtVec cannot be instantiated directly outside of storage_schema module + // use [Schema::new_vec()] + #[inline] + pub(super) async fn new( + pending_writes: AtomicRw, + reader: Arc, + key_prefix: u8, + name: &str, + ) -> Self { + let vec = DbtVecPrivate::::new(pending_writes, reader, key_prefix, name).await; + + Self { inner: vec } + } +} + +#[async_trait::async_trait] +impl StorageVecBase for DbtVec +// impl DbtVec +where + V: Clone + Debug, + V: Serialize + DeserializeOwned + Send + Sync, +{ + #[inline] + async fn is_empty(&self) -> bool { + self.inner.is_empty().await + } + + #[inline] + async fn len(&self) -> Index { + self.inner.len().await + } + + #[inline] + async fn get(&self, index: Index) -> V { + self.inner.get(index).await + } + + #[inline] + async fn get_many(&self, indices: &[Index]) -> Vec { + self.inner.get_many(indices).await + } + + #[inline] + async fn get_all(&self) -> Vec { + self.inner.get_all().await + } + + #[inline] + async fn set(&mut self, index: Index, value: V) { + self.inner.set(index, value).await; + } + + #[inline] + async fn set_many(&mut self, key_vals: impl IntoIterator + Send) { + self.inner + .set_many(key_vals.into_iter().collect::>()) + .await; + } + + #[inline] + async fn pop(&mut self) -> Option { + self.inner.pop().await + } + + #[inline] + async fn push(&mut self, value: V) { + self.inner.push(value).await; + } + + #[inline] + async fn clear(&mut self) { + self.inner.clear().await; + } +} + +#[async_trait::async_trait] +impl DbTable for DbtVec +where + V: Clone, + V: Serialize + DeserializeOwned + Send + Sync, +{ + #[inline] + async fn restore_or_new(&mut self) { + if let Some(length) = self + .inner + .reader + .get(DbtVecPrivate::::get_length_key(self.inner.key_prefix)) + .await + { + self.inner.current_length = Some(length.into_any()); + } else { + self.inner.current_length = Some(0); + } + } +} + +/// Async Streams (ie async iterators) +impl StorageVecStream + for DbtVec +{ + async fn stream<'a>(&'a self) -> impl Stream + 'a + where + T: 'a, + { + self.stream_many(0..self.len().await).await + } + + async fn stream_values<'a>(&'a self) -> impl Stream + 'a + where + T: 'a, + { + self.stream_many_values(0..self.len().await).await + } +} + +impl StorageVec + for DbtVec +{ +} + +#[cfg(test)] +mod tests { + + use super::super::SimpleRustyStorage; + use super::*; + use crate::database::NeptuneLevelDb; + + pub async fn mk_test_vec_u64() -> DbtVec { + // open new DB that will be closed on drop. + let db = NeptuneLevelDb::open_new_test_database(true, None, None, None) + .await + .unwrap(); + let mut rusty_storage = SimpleRustyStorage::new(db); + rusty_storage.schema.new_vec::("test-vector").await + } + + pub mod streams { + use super::super::super::super::storage_vec::traits::tests::streams as stream_tests; + use super::*; + + #[tokio::test] + pub async fn stream() { + stream_tests::stream(mk_test_vec_u64().await).await + } + + #[tokio::test] + pub async fn stream_many() { + stream_tests::stream_many(mk_test_vec_u64().await).await + } + } +} diff --git a/src/database/storage/storage_schema/dbtvec_private.rs b/src/database/storage/storage_schema/dbtvec_private.rs new file mode 100644 index 00000000..769bd95a --- /dev/null +++ b/src/database/storage/storage_schema/dbtvec_private.rs @@ -0,0 +1,389 @@ +use super::super::storage_vec::Index; +use super::RustyKey; +use super::{traits::StorageReader, PendingWrites, RustyValue, SimpleRustyReader, WriteOperation}; +use crate::locks::tokio::AtomicRw; +use itertools::Itertools; +use serde::de::DeserializeOwned; +use serde::Serialize; +use std::fmt::{Debug, Formatter}; +use std::{collections::HashMap, sync::Arc}; + +pub(super) struct DbtVecPrivate { + pub(super) pending_writes: AtomicRw, + pub(super) reader: Arc, + pub(super) current_length: Option, + pub(super) key_prefix: u8, + pub(super) cache: HashMap, + persist_count: usize, + pub(super) name: String, + phantom: std::marker::PhantomData, +} + +impl Debug for DbtVecPrivate +where + V: Debug, +{ + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + f.debug_struct("DbtVecPrivate") + .field("reader", &"Arc") + .field("current_length", &self.current_length) + .field("key_prefix", &self.key_prefix) + .field("cache", &self.cache) + .field("name", &self.name) + .finish() + } +} + +impl DbtVecPrivate { + #[inline] + pub(super) async fn get(&self, index: Index) -> V { + // Disallow getting values out-of-bounds + + assert!( + index < self.len().await, + "Out-of-bounds. Got {index} but length was {}. persisted vector name: {}", + self.len().await, + self.name + ); + + // try cache first + if self.cache.contains_key(&index) { + return self + .cache + .get(&index) + .expect("there should be some value") + .clone(); + } + + // then try persistent storage + let key: RustyKey = self.get_index_key(index); + let val = self.reader.get(key).await.unwrap_or_else(|| { + panic!( + "Element with index {index} does not exist in {}. This should not happen", + self.name + ) + }); + val.into_any() + } + + #[inline] + pub(super) async fn set(&mut self, index: Index, value: V) { + // Disallow setting values out-of-bounds + + assert!( + index < self.len().await, + "Out-of-bounds. Got {index} but length was {}. persisted vector name: {}", + self.len().await, + self.name + ); + + self.write_op_overwrite(index, value).await; + } +} + +impl DbtVecPrivate +where + V: Clone + Serialize, +{ + // Return the key used to store the length of the vector + #[inline] + pub(super) fn get_length_key(key_prefix: u8) -> RustyKey { + let const_length_key: RustyKey = 0u8.into(); + let key_prefix_key: RustyKey = key_prefix.into(); + + // This concatenates prefix + length (0u8) to form the + // real Key as used in LevelDB + (key_prefix_key, const_length_key).into() + } + + /// Return the length at the last write to disk + #[inline] + pub(super) async fn persisted_length(&self) -> Option { + self.reader + .get(Self::get_length_key(self.key_prefix)) + .await + .map(|v| v.into_any()) + } + + /// Return the key of K type used to store the element at a given index of Index type + #[inline] + pub(super) fn get_index_key(&self, index: Index) -> RustyKey { + let key_prefix_key: RustyKey = self.key_prefix.into(); + let index_key: RustyKey = index.into(); + + // This concatenates prefix + index to form the + // real Key as used in LevelDB + (key_prefix_key, index_key).into() + } + + #[inline] + pub(crate) async fn new( + pending_writes: AtomicRw, + reader: Arc, + key_prefix: u8, + name: &str, + ) -> Self { + let length = None; + let cache = HashMap::new(); + let persist_count = pending_writes.lock_guard().await.persist_count; + + Self { + pending_writes, + key_prefix, + reader, + current_length: length, + cache, + persist_count, + name: name.to_string(), + phantom: Default::default(), + } + } + + #[inline] + async fn write_op_overwrite(&mut self, index: Index, value: V) { + let persist_count = { + let mut pending_writes = self.pending_writes.lock_guard_mut().await; + + pending_writes.write_ops.push(WriteOperation::Write( + self.get_index_key(index), + RustyValue::from_any(&value), + )); + pending_writes.persist_count + }; + self.process_persist_count(persist_count); + + self.cache.insert(index, value.clone()); + } + + fn process_persist_count(&mut self, pending_writes_persist_count: usize) { + if pending_writes_persist_count > self.persist_count { + self.cache.clear(); + } + self.persist_count = pending_writes_persist_count; + } +} + +impl DbtVecPrivate +where + V: Clone + Serialize + DeserializeOwned, +{ + #[inline] + pub(super) async fn is_empty(&self) -> bool { + self.len().await == 0 + } + + #[inline] + pub(super) async fn len(&self) -> Index { + match self.current_length { + Some(l) => l, + None => self.persisted_length().await.unwrap_or(0), + } + } + + /// Fetch multiple elements from a `DbtVec` and return the elements matching the order + /// of the input indices. + pub(super) async fn get_many(&self, indices: &[Index]) -> Vec { + fn sort_to_match_requested_index_order(indexed_elements: HashMap) -> Vec { + let mut elements = indexed_elements.into_iter().collect_vec(); + elements.sort_unstable_by_key(|&(index_position, _)| index_position); + elements.into_iter().map(|(_, element)| element).collect() + } + + let max_index = match indices.iter().max() { + Some(i) => i, + None => return vec![], + }; + + assert!( + *max_index < self.len().await, + "Out-of-bounds. Got index {max_index} but length was {}. persisted vector name: {}", + self.len().await, + self.name + ); + + let (indices_of_elements_in_cache, indices_of_elements_not_in_cache): (Vec<_>, Vec<_>) = + indices + .iter() + .copied() + .enumerate() + .partition(|&(_, index)| self.cache.contains_key(&index)); + + let mut fetched_elements = HashMap::with_capacity(indices.len()); + for (index_position, index) in indices_of_elements_in_cache { + let value = self + .cache + .get(&index) + .expect("there should be some value") + .clone(); + fetched_elements.insert(index_position, value); + } + + let no_need_to_lock_database = indices_of_elements_not_in_cache.is_empty(); + if no_need_to_lock_database { + return sort_to_match_requested_index_order(fetched_elements); + } + + let keys_for_indices_not_in_cache = indices_of_elements_not_in_cache + .iter() + .map(|&(_, index)| self.get_index_key(index)) + .collect_vec(); + let elements_fetched_from_db = self + .reader + .get_many(keys_for_indices_not_in_cache) + .await + .into_iter() + .map(|x| x.expect("there should be some value").into_any()); + + let indexed_fetched_elements_from_db = indices_of_elements_not_in_cache + .iter() + .map(|&(index_position, _)| index_position) + .zip_eq(elements_fetched_from_db); + fetched_elements.extend(indexed_fetched_elements_from_db); + + sort_to_match_requested_index_order(fetched_elements) + } + + /// Return all stored elements in a vector, whose index matches the StorageVec's. + /// It's the caller's responsibility that there is enough memory to store all elements. + pub(super) async fn get_all(&self) -> Vec { + // let fake_cache: HashMap = HashMap::new(); + + let (indices_of_elements_in_cache, indices_of_elements_not_in_cache): (Vec<_>, Vec<_>) = + (0..self.len().await).partition(|index| self.cache.contains_key(index)); + + let mut fetched_elements: Vec> = vec![None; self.len().await as usize]; + for index in indices_of_elements_in_cache { + let element = self.cache[&index].clone(); + fetched_elements[index as usize] = Some(element); + } + + let no_need_to_lock_database = indices_of_elements_not_in_cache.is_empty(); + if no_need_to_lock_database { + return fetched_elements + .into_iter() + .map(|x| x.expect("there should be some value")) + .collect_vec(); + } + + let keys = indices_of_elements_not_in_cache + .iter() + .map(|x| self.get_index_key(*x)) + .collect_vec(); + let elements_fetched_from_db = self + .reader + .get_many(keys) + .await + .into_iter() + .map(|x| x.expect("there should be some value").into_any()); + let indexed_fetched_elements_from_db = indices_of_elements_not_in_cache + .into_iter() + .zip_eq(elements_fetched_from_db); + for (index, element) in indexed_fetched_elements_from_db { + fetched_elements[index as usize] = Some(element); + } + + fetched_elements + .into_iter() + .map(|x| x.expect("there should be some value")) + .collect_vec() + } + + /// set multiple elements. + /// + /// panics if key_vals contains an index not in the collection + /// + /// It is the caller's responsibility to ensure that index values are + /// unique. If not, the last value with the same index will win. + /// For unordered collections such as HashMap, the behavior is undefined. + pub(super) async fn set_many(&mut self, key_vals: impl IntoIterator + Send) { + let self_len = self.len().await; + + for (index, value) in key_vals.into_iter() { + assert!( + index < self_len, + "Out-of-bounds. Got {index} but length was {}. persisted vector name: {}", + self_len, + self.name + ); + + self.write_op_overwrite(index, value).await; + } + } + + #[inline] + pub(super) async fn pop(&mut self) -> Option { + // If vector is empty, return None + if self.is_empty().await { + return None; + } + + // Update length + let current_length = self + .current_length + .as_mut() + .expect("there should be some value"); + + *current_length -= 1; + + let new_length = *current_length; + + let persist_count = { + let mut pending_writes = self.pending_writes.lock_guard_mut().await; + + pending_writes + .write_ops + .push(WriteOperation::Delete(self.get_index_key(new_length))); + pending_writes.write_ops.push(WriteOperation::Write( + Self::get_length_key(self.key_prefix), + RustyValue::from_any(&new_length), + )); + pending_writes.persist_count + }; + + self.process_persist_count(persist_count); + + // try cache first + // let current_length = self.len().await; + if self.cache.contains_key(&new_length) { + self.cache.remove(&new_length) + } else { + // then try persistent storage + let key = self.get_index_key(new_length); + self.reader.get(key).await.map(|value| value.into_any()) + } + } + + #[inline] + pub(super) async fn push(&mut self, value: V) { + // record in cache + let current_length = self.len().await; + let new_length = current_length + 1; + + let persist_count = { + let mut pending_writes = self.pending_writes.lock_guard_mut().await; + + pending_writes.write_ops.push(WriteOperation::Write( + self.get_index_key(current_length), + RustyValue::from_any(&value), + )); + pending_writes.write_ops.push(WriteOperation::Write( + Self::get_length_key(self.key_prefix), + RustyValue::from_any(&new_length), + )); + pending_writes.persist_count + }; + self.process_persist_count(persist_count); + + let _old_val = self.cache.insert(current_length, value.clone()); + + // update length + self.current_length = Some(new_length); + } + + #[inline] + pub(super) async fn clear(&mut self) { + while !self.is_empty().await { + self.pop().await; + } + } +} diff --git a/src/database/storage/storage_schema/enums.rs b/src/database/storage/storage_schema/enums.rs new file mode 100644 index 00000000..1364ab6a --- /dev/null +++ b/src/database/storage/storage_schema/enums.rs @@ -0,0 +1,22 @@ +use super::super::storage_vec::Index; +use super::{RustyKey, RustyValue}; + +/// Database write operations +#[derive(Debug, Clone)] +pub enum WriteOperation { + /// write operation + Write(RustyKey, RustyValue), + /// delete operation + Delete(RustyKey), +} + +/// Vector write operations +#[derive(Debug, Clone)] +pub enum VecWriteOperation { + /// overwrite, aka set operation + OverWrite((Index, T)), + /// push to end operation + Push(T), + /// pop from end operation + Pop, +} diff --git a/src/database/storage/storage_schema/mod.rs b/src/database/storage/storage_schema/mod.rs new file mode 100644 index 00000000..66b4b81b --- /dev/null +++ b/src/database/storage/storage_schema/mod.rs @@ -0,0 +1,1055 @@ +//! LevelDB provides atomic writes to a database. However each database is a +//! simple key/value store. There is no logical sub-unit of a database that we +//! might call a "Table" or `struct`. +//! +//! This makes it difficult for rust code to have multiple `struct` stored in a +//! single DB with atomic updates. +//! +//! This module provides a virtual DB Schema with logical "tables" that are +//! backed by key/val pairs in a single LevelDB database. +//! +//! Atomic writes are supported across multiple "tables". +//! +//! [`DbtSchema`] that can generate any number of [`DbtVec`] and +//! [`DbtSingleton`] collection types. +//! +//! Mutating operations to these "tables" are cached and written to the database +//! in a single atomic batch operation. +//! +//! Important: write operations are not written until +//! SimpleRustyStorage::persist() is called. + +mod dbtsingleton; +mod dbtsingleton_private; +mod dbtvec; +mod dbtvec_private; +mod enums; +mod pending_writes; +mod rusty_key; +mod rusty_reader; +mod rusty_value; +mod schema; +mod simple_rusty_reader; +mod simple_rusty_storage; +pub mod traits; + +pub use dbtsingleton::*; +pub use dbtvec::*; +pub use enums::*; +use pending_writes::*; +pub use rusty_key::*; +pub use rusty_reader::*; +pub use rusty_value::*; +pub use schema::*; +pub use simple_rusty_reader::*; +pub use simple_rusty_storage::*; + +#[cfg(test)] +mod tests { + + use super::traits::*; + use super::*; + + use std::sync::Arc; + + use super::super::storage_vec::{traits::*, Index}; + use rand::{random, Rng, RngCore}; + use serde::{Deserialize, Serialize}; + + use crate::database::NeptuneLevelDb; + use crate::twenty_first::shared_math::other::random_elements; + + use itertools::Itertools; + + #[derive(Default, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + struct S(Vec); + impl From> for S { + fn from(value: Vec) -> Self { + S(value) + } + } + impl From for Vec { + fn from(value: S) -> Self { + value.0 + } + } + impl From<(S, S)> for S { + fn from(value: (S, S)) -> Self { + let vector0: Vec = value.0.into(); + let vector1: Vec = value.1.into(); + S([vector0, vector1].concat()) + } + } + impl From for u64 { + fn from(value: S) -> Self { + u64::from_be_bytes(value.0.try_into().unwrap()) + } + } + + #[tokio::test] + async fn test_simple_singleton() { + let singleton_value = S([1u8, 3u8, 3u8, 7u8].to_vec()); + + // open new NeptuneLevelDb that will not be dropped on close. + let db = NeptuneLevelDb::open_new_test_database(false, None, None, None) + .await + .unwrap(); + let db_path = db.path().clone(); + + let mut rusty_storage = SimpleRustyStorage::new(db); + assert_eq!(1, Arc::strong_count(&rusty_storage.schema.reader)); + let mut singleton = rusty_storage + .schema + .new_singleton::("singleton".to_owned()) + .await; + assert_eq!(2, Arc::strong_count(&rusty_storage.schema.reader)); + + // test + assert_eq!(singleton.get().await, S([].to_vec())); + + // set + singleton.set(singleton_value.clone()).await; + + // test + assert_eq!(singleton.get().await, singleton_value); + + // persist + rusty_storage.persist().await; + + // test + assert_eq!(singleton.get().await, singleton_value); + + assert_eq!(2, Arc::strong_count(&rusty_storage.schema.reader)); + + // This is just so we can count reader references + // after rusty_storage is dropped. + let reader_ref = rusty_storage.schema.reader.clone(); + assert_eq!(3, Arc::strong_count(&reader_ref)); + + // drop + drop(rusty_storage); // <--- 1 reader ref dropped. + assert_eq!(2, Arc::strong_count(&reader_ref)); + + drop(singleton); // <--- 1 reader ref dropped + assert_eq!(1, Arc::strong_count(&reader_ref)); + + drop(reader_ref); // <--- Final reader ref dropped. Db closes. + + // restore. re-open existing NeptuneLevelDb. + let new_db = NeptuneLevelDb::open_test_database(&db_path, true, None, None, None) + .await + .unwrap(); + let mut new_rusty_storage = SimpleRustyStorage::new(new_db); + let new_singleton = new_rusty_storage + .schema + .new_singleton::("singleton".to_owned()) + .await; + + // test + assert_eq!(new_singleton.get().await, singleton_value); + } + + #[tokio::test] + async fn test_simple_vector() { + // open new NeptuneLevelDb that will not be dropped on close. + let db = NeptuneLevelDb::open_new_test_database(false, None, None, None) + .await + .unwrap(); + let db_path = db.path().clone(); + + let mut rusty_storage = SimpleRustyStorage::new(db); + let mut vector = rusty_storage.schema.new_vec::("test-vector").await; + + // should work to pass empty array, when vector.is_empty() == true + vector.set_all([]).await; + + // test `get_all` + assert!( + vector.get_all().await.is_empty(), + "`get_all` on unpopulated vector must return empty vector" + ); + + // populate + vector.push(S([1u8].to_vec())).await; + vector.push(S([3u8].to_vec())).await; + vector.push(S([4u8].to_vec())).await; + vector.push(S([7u8].to_vec())).await; + vector.push(S([8u8].to_vec())).await; + + // test `get` + assert_eq!(vector.get(0).await, S([1u8].to_vec())); + assert_eq!(vector.get(1).await, S([3u8].to_vec())); + assert_eq!(vector.get(2).await, S([4u8].to_vec())); + assert_eq!(vector.get(3).await, S([7u8].to_vec())); + assert_eq!(vector.get(4).await, S([8u8].to_vec())); + assert_eq!(vector.len().await, 5); + + // test `get_many` + assert_eq!( + vector.get_many(&[0, 2, 3]).await, + vec![ + vector.get(0).await, + vector.get(2).await, + vector.get(3).await + ] + ); + assert_eq!( + vector.get_many(&[2, 3, 0]).await, + vec![ + vector.get(2).await, + vector.get(3).await, + vector.get(0).await + ] + ); + assert_eq!( + vector.get_many(&[3, 0, 2]).await, + vec![ + vector.get(3).await, + vector.get(0).await, + vector.get(2).await + ] + ); + assert_eq!( + vector.get_many(&[0, 1, 2, 3, 4]).await, + vec![ + vector.get(0).await, + vector.get(1).await, + vector.get(2).await, + vector.get(3).await, + vector.get(4).await, + ] + ); + assert_eq!(vector.get_many(&[]).await, vec![]); + assert_eq!(vector.get_many(&[3]).await, vec![vector.get(3).await]); + + // We allow `get_many` to take repeated indices. + assert_eq!(vector.get_many(&[3; 0]).await, vec![vector.get(3).await; 0]); + assert_eq!(vector.get_many(&[3; 1]).await, vec![vector.get(3).await; 1]); + assert_eq!(vector.get_many(&[3; 2]).await, vec![vector.get(3).await; 2]); + assert_eq!(vector.get_many(&[3; 3]).await, vec![vector.get(3).await; 3]); + assert_eq!(vector.get_many(&[3; 4]).await, vec![vector.get(3).await; 4]); + assert_eq!(vector.get_many(&[3; 5]).await, vec![vector.get(3).await; 5]); + assert_eq!( + vector.get_many(&[3, 3, 2, 3]).await, + vec![ + vector.get(3).await, + vector.get(3).await, + vector.get(2).await, + vector.get(3).await + ] + ); + + // at this point, `vector` should contain: + let expect_values = vec![ + S([1u8].to_vec()), + S([3u8].to_vec()), + S([4u8].to_vec()), + S([7u8].to_vec()), + S([8u8].to_vec()), + ]; + + // test `get_all` + assert_eq!( + expect_values, + vector.get_all().await, + "`get_all` must return expected values" + ); + + // test roundtrip through `set_all`, `get_all` + let values_tmp = vec![ + S([2u8].to_vec()), + S([4u8].to_vec()), + S([6u8].to_vec()), + S([8u8].to_vec()), + S([9u8].to_vec()), + ]; + vector.set_all(values_tmp.clone()).await; + + assert_eq!( + values_tmp, + vector.get_all().await, + "`get_all` must return values passed to `set_all`", + ); + + vector.set_all(expect_values.clone()).await; + + // persist + rusty_storage.persist().await; + + // test `get_all` after persist + assert_eq!( + expect_values, + vector.get_all().await, + "`get_all` must return expected values after persist" + ); + + // modify + let last = vector.pop().await.unwrap(); + + // test + assert_eq!(last, S([8u8].to_vec())); + + // drop without persisting + drop(rusty_storage); // <--- DB ref dropped. + drop(vector); // <--- Final DB ref dropped. NeptuneLevelDb closes + + // Open existing database. + let new_db = NeptuneLevelDb::open_test_database(&db_path, true, None, None, None) + .await + .unwrap(); + + let mut new_rusty_storage = SimpleRustyStorage::new(new_db); + let mut new_vector = new_rusty_storage.schema.new_vec::("test-vector").await; + + // modify + new_vector.set(2, S([3u8].to_vec())).await; + + let last_again = new_vector.pop().await.unwrap(); + assert_eq!(last_again, S([8u8].to_vec())); + + // test + assert_eq!(new_vector.get(0).await, S([1u8].to_vec())); + assert_eq!(new_vector.get(1).await, S([3u8].to_vec())); + assert_eq!(new_vector.get(2).await, S([3u8].to_vec())); + assert_eq!(new_vector.get(3).await, S([7u8].to_vec())); + assert_eq!(new_vector.len().await, 4); + + // test `get_many`, ensure that output matches input ordering + assert_eq!( + new_vector.get_many(&[2]).await, + vec![new_vector.get(2).await] + ); + assert_eq!( + new_vector.get_many(&[3, 1, 0]).await, + vec![ + new_vector.get(3).await, + new_vector.get(1).await, + new_vector.get(0).await + ] + ); + assert_eq!( + new_vector.get_many(&[0, 2, 3]).await, + vec![ + new_vector.get(0).await, + new_vector.get(2).await, + new_vector.get(3).await + ] + ); + assert_eq!( + new_vector.get_many(&[0, 1, 2, 3]).await, + vec![ + new_vector.get(0).await, + new_vector.get(1).await, + new_vector.get(2).await, + new_vector.get(3).await, + ] + ); + assert_eq!(new_vector.get_many(&[]).await, vec![]); + assert_eq!( + new_vector.get_many(&[3]).await, + vec![new_vector.get(3).await] + ); + + // We allow `get_many` to take repeated indices. + assert_eq!( + new_vector.get_many(&[3; 0]).await, + vec![new_vector.get(3).await; 0] + ); + assert_eq!( + new_vector.get_many(&[3; 1]).await, + vec![new_vector.get(3).await; 1] + ); + assert_eq!( + new_vector.get_many(&[3; 2]).await, + vec![new_vector.get(3).await; 2] + ); + assert_eq!( + new_vector.get_many(&[3; 3]).await, + vec![new_vector.get(3).await; 3] + ); + assert_eq!( + new_vector.get_many(&[3; 4]).await, + vec![new_vector.get(3).await; 4] + ); + assert_eq!( + new_vector.get_many(&[3; 5]).await, + vec![new_vector.get(3).await; 5] + ); + + // test `get_all` + assert_eq!( + vec![ + S([1u8].to_vec()), + S([3u8].to_vec()), + S([3u8].to_vec()), + S([7u8].to_vec()), + ], + new_vector.get_all().await, + "`get_all` must return expected values" + ); + + new_vector.set(1, S([130u8].to_vec())).await; + assert_eq!( + vec![ + S([1u8].to_vec()), + S([130u8].to_vec()), + S([3u8].to_vec()), + S([7u8].to_vec()), + ], + new_vector.get_all().await, + "`get_all` must return expected values, after mutation" + ); + } + + #[tokio::test] + async fn test_dbtcvecs_get_many() { + let db = NeptuneLevelDb::open_new_test_database(true, None, None, None) + .await + .unwrap(); + + let mut rusty_storage = SimpleRustyStorage::new(db); + let mut vector = rusty_storage.schema.new_vec::("test-vector").await; + + // populate + const TEST_LIST_LENGTH: u8 = 105; + for i in 0u8..TEST_LIST_LENGTH { + vector.push(S(vec![i, i, i])).await; + } + + let read_indices: Vec = random_elements::(30) + .into_iter() + .map(|x| x % TEST_LIST_LENGTH as u64) + .collect(); + let values = vector.get_many(&read_indices).await; + assert!(read_indices + .iter() + .zip(values) + .all(|(index, value)| value == S(vec![*index as u8, *index as u8, *index as u8]))); + + // Mutate some indices + let mutate_indices: Vec = random_elements::(30) + .into_iter() + .map(|x| x % TEST_LIST_LENGTH as u64) + .collect(); + for index in mutate_indices.iter() { + vector + .set( + *index, + S(vec![*index as u8 + 1, *index as u8 + 1, *index as u8 + 1]), + ) + .await + } + + let new_values = vector.get_many(&read_indices).await; + for (value, index) in new_values.into_iter().zip(read_indices) { + if mutate_indices.contains(&index) { + assert_eq!( + S(vec![index as u8 + 1, index as u8 + 1, index as u8 + 1]), + value + ) + } else { + assert_eq!(S(vec![index as u8, index as u8, index as u8]), value) + } + } + } + + #[tokio::test] + async fn test_dbtcvecs_set_many_get_many() { + let db = NeptuneLevelDb::open_new_test_database(true, None, None, None) + .await + .unwrap(); + + // initialize storage + let mut rusty_storage = SimpleRustyStorage::new(db); + let mut vector = rusty_storage.schema.new_vec::("test-vector").await; + + // Generate initial index/value pairs. + const TEST_LIST_LENGTH: u8 = 105; + let init_keyvals: Vec<(Index, S)> = (0u8..TEST_LIST_LENGTH) + .map(|i| (i as Index, S(vec![i, i, i]))) + .collect(); + + // set_many() does not grow the list, so we must first push + // some empty elems, to desired length. + for _ in 0u8..TEST_LIST_LENGTH { + vector.push(S(vec![])).await; + } + + // set the initial values + vector.set_many(init_keyvals).await; + + // generate some random indices to read + let read_indices: Vec = random_elements::(30) + .into_iter() + .map(|x| x % TEST_LIST_LENGTH as u64) + .collect(); + + // perform read, and validate as expected + let values = vector.get_many(&read_indices).await; + assert!(read_indices + .iter() + .zip(values) + .all(|(index, value)| value == S(vec![*index as u8, *index as u8, *index as u8]))); + + // Generate some random indices for mutation + let mutate_indices: Vec = random_elements::(30) + .iter() + .map(|x| x % TEST_LIST_LENGTH as u64) + .collect(); + + // Generate keyvals for mutation + let mutate_keyvals: Vec<(Index, S)> = mutate_indices + .iter() + .map(|index| { + let val = (index % TEST_LIST_LENGTH as u64 + 1) as u8; + (*index, S(vec![val, val, val])) + }) + .collect(); + + // Mutate values at randomly generated indices + vector.set_many(mutate_keyvals).await; + + // Verify mutated values, and non-mutated also. + let new_values = vector.get_many(&read_indices).await; + for (value, index) in new_values.into_iter().zip(read_indices.clone()) { + if mutate_indices.contains(&index) { + assert_eq!( + S(vec![index as u8 + 1, index as u8 + 1, index as u8 + 1]), + value + ) + } else { + assert_eq!(S(vec![index as u8, index as u8, index as u8]), value) + } + } + + // Persist and verify that result is unchanged + rusty_storage.persist().await; + let new_values_after_persist = vector.get_many(&read_indices).await; + for (value, index) in new_values_after_persist.into_iter().zip(read_indices) { + if mutate_indices.contains(&index) { + assert_eq!( + S(vec![index as u8 + 1, index as u8 + 1, index as u8 + 1]), + value + ) + } else { + assert_eq!(S(vec![index as u8, index as u8, index as u8]), value) + } + } + } + + #[tokio::test] + async fn test_dbtcvecs_set_all_get_many() { + let db = NeptuneLevelDb::open_new_test_database(true, None, None, None) + .await + .unwrap(); + + // initialize storage + let mut rusty_storage = SimpleRustyStorage::new(db); + let mut vector = rusty_storage.schema.new_vec::("test-vector").await; + + // Generate initial index/value pairs. + const TEST_LIST_LENGTH: u8 = 105; + let init_vals: Vec = (0u8..TEST_LIST_LENGTH) + .map(|i| (S(vec![i, i, i]))) + .collect(); + + let mut mutate_vals = init_vals.clone(); // for later + + // set_all() does not grow the list, so we must first push + // some empty elems, to desired length. + for _ in 0u8..TEST_LIST_LENGTH { + vector.push(S(vec![])).await; + } + + // set the initial values + vector.set_all(init_vals).await; + + // generate some random indices to read + let read_indices: Vec = random_elements::(30) + .into_iter() + .map(|x| x % TEST_LIST_LENGTH as u64) + .collect(); + + // perform read, and validate as expected + let values = vector.get_many(&read_indices).await; + assert!(read_indices + .iter() + .zip(values) + .all(|(index, value)| value == S(vec![*index as u8, *index as u8, *index as u8]))); + + // Generate some random indices for mutation + let mutate_indices: Vec = random_elements::(30) + .iter() + .map(|x| x % TEST_LIST_LENGTH as u64) + .collect(); + + // Generate vals for mutation + for index in mutate_indices.iter() { + let val = (index % TEST_LIST_LENGTH as u64 + 1) as u8; + mutate_vals[*index as usize] = S(vec![val, val, val]); + } + + // Mutate values at randomly generated indices + vector.set_all(mutate_vals).await; + + // Verify mutated values, and non-mutated also. + let new_values = vector.get_many(&read_indices).await; + for (value, index) in new_values.into_iter().zip(read_indices) { + if mutate_indices.contains(&index) { + assert_eq!( + S(vec![index as u8 + 1, index as u8 + 1, index as u8 + 1]), + value + ) + } else { + assert_eq!(S(vec![index as u8, index as u8, index as u8]), value) + } + } + } + + #[tokio::test] + async fn storage_schema_vector_pbt() { + let db = NeptuneLevelDb::open_new_test_database(true, None, None, None) + .await + .unwrap(); + + let mut rusty_storage = SimpleRustyStorage::new(db); + let mut persisted_vector = rusty_storage.schema.new_vec::("test-vector").await; + + // Insert 1000 elements + let mut rng = rand::thread_rng(); + let mut normal_vector = vec![]; + for _ in 0..1000 { + let value = random(); + normal_vector.push(value); + persisted_vector.push(value).await; + } + rusty_storage.persist().await; + + for _i in 0..1000 { + assert_eq!(normal_vector.len() as u64, persisted_vector.len().await); + + match rng.gen_range(0..=5) { + 0 => { + // `push` + let push_val = rng.next_u64(); + persisted_vector.push(push_val).await; + normal_vector.push(push_val); + } + 1 => { + // `pop` + let normal_pop_val = normal_vector.pop().unwrap(); + let persisted_pop_val = persisted_vector.pop().await.unwrap(); + assert_eq!(persisted_pop_val, normal_pop_val); + } + 2 => { + // `get_many` + assert_eq!(normal_vector.len(), persisted_vector.len().await as usize); + + let index = rng.gen_range(0..normal_vector.len()); + assert_eq!(Vec::::default(), persisted_vector.get_many(&[]).await); + assert_eq!( + normal_vector[index], + persisted_vector.get(index as u64).await + ); + assert_eq!( + vec![normal_vector[index]], + persisted_vector.get_many(&[index as u64]).await + ); + assert_eq!( + vec![normal_vector[index], normal_vector[index]], + persisted_vector + .get_many(&[index as u64, index as u64]) + .await + ); + } + 3 => { + // `set` + let value = rng.next_u64(); + let index = rng.gen_range(0..normal_vector.len()); + normal_vector[index] = value; + persisted_vector.set(index as u64, value).await; + } + 4 => { + // `set_many` + let indices: Vec = (0..rng.gen_range(0..10)) + .map(|_| rng.gen_range(0..normal_vector.len() as u64)) + .unique() + .collect(); + let values: Vec = (0..indices.len()).map(|_| rng.next_u64()).collect_vec(); + let update: Vec<(u64, u64)> = + indices.into_iter().zip_eq(values.into_iter()).collect(); + for (key, val) in update.iter() { + normal_vector[*key as usize] = *val; + } + persisted_vector.set_many(update).await; + } + 5 => { + // persist + rusty_storage.persist().await; + } + _ => unreachable!(), + } + } + + // Check equality after above loop + assert_eq!(normal_vector.len(), persisted_vector.len().await as usize); + for (i, nvi) in normal_vector.iter().enumerate() { + assert_eq!(*nvi, persisted_vector.get(i as u64).await); + } + + // Check equality using `get_many` + assert_eq!( + normal_vector, + persisted_vector + .get_many(&(0..normal_vector.len() as u64).collect_vec()) + .await + ); + + // Check equality after persisting updates + rusty_storage.persist().await; + assert_eq!(normal_vector.len(), persisted_vector.len().await as usize); + for (i, nvi) in normal_vector.iter().enumerate() { + assert_eq!(*nvi, persisted_vector.get(i as u64).await); + } + + // Check equality using `get_many` + assert_eq!( + normal_vector, + persisted_vector + .get_many(&(0..normal_vector.len() as u64).collect_vec()) + .await + ); + } + + #[tokio::test] + async fn singleton_vector_key_collission() { + let db = NeptuneLevelDb::open_new_test_database(false, None, None, None) + .await + .unwrap(); + let db_path = db.path().clone(); + let mut rusty_storage = SimpleRustyStorage::new(db); + let vector1 = rusty_storage.schema.new_vec::("test-vector1").await; + let mut singleton = rusty_storage + .schema + .new_singleton::("singleton-1".to_owned()) + .await; + + // initialize + assert!(vector1.is_empty().await); + singleton.set(1776u64).await; + assert!(vector1.is_empty().await); + rusty_storage.persist().await; + assert!(vector1.is_empty().await); + + drop(rusty_storage); // <-- DB ref dropped + drop(vector1); // <-- DB ref dropped + drop(singleton); // <-- final DB ref dropped (NeptuneLevelDb closes) + + // re-open NeptuneLevelDb / restore from disk + let new_db = NeptuneLevelDb::open_test_database(&db_path, true, None, None, None) + .await + .unwrap(); + let mut new_rusty_storage = SimpleRustyStorage::new(new_db); + let new_vector1 = new_rusty_storage.schema.new_vec::("test-vector1").await; + assert!(new_vector1.is_empty().await); + } + + #[tokio::test] + async fn test_two_vectors_and_singleton() { + let singleton_value = S([3u8, 3u8, 3u8, 1u8].to_vec()); + + // Open new database that will not be destroyed on close. + let db = NeptuneLevelDb::open_new_test_database(false, None, None, None) + .await + .unwrap(); + let db_path = db.path().clone(); + + let mut rusty_storage = SimpleRustyStorage::new(db); + let mut vector1 = rusty_storage.schema.new_vec::("test-vector1").await; + let mut vector2 = rusty_storage.schema.new_vec::("test-vector2").await; + let mut singleton = rusty_storage + .schema + .new_singleton::("singleton".to_owned()) + .await; + + assert!( + vector1.get_all().await.is_empty(), + "`get_all` call to unpopulated persistent vector must return empty vector" + ); + assert!( + vector2.get_all().await.is_empty(), + "`get_all` call to unpopulated persistent vector must return empty vector" + ); + + // populate 1 + vector1.push(S([1u8].to_vec())).await; + vector1.push(S([30u8].to_vec())).await; + vector1.push(S([4u8].to_vec())).await; + vector1.push(S([7u8].to_vec())).await; + vector1.push(S([8u8].to_vec())).await; + + // populate 2 + vector2.push(S([1u8].to_vec())).await; + vector2.push(S([3u8].to_vec())).await; + vector2.push(S([3u8].to_vec())).await; + vector2.push(S([7u8].to_vec())).await; + + // set singleton + singleton.set(singleton_value.clone()).await; + + // modify 1 + vector1.set(0, S([8u8].to_vec())).await; + + // test + assert_eq!(vector1.get(0).await, S([8u8].to_vec())); + assert_eq!(vector1.get(1).await, S([30u8].to_vec())); + assert_eq!(vector1.get(2).await, S([4u8].to_vec())); + assert_eq!(vector1.get(3).await, S([7u8].to_vec())); + assert_eq!(vector1.get(4).await, S([8u8].to_vec())); + assert_eq!( + vector1.get_many(&[2, 0, 3]).await, + vec![ + vector1.get(2).await, + vector1.get(0).await, + vector1.get(3).await + ] + ); + assert_eq!( + vector1.get_many(&[2, 3, 1]).await, + vec![ + vector1.get(2).await, + vector1.get(3).await, + vector1.get(1).await + ] + ); + assert_eq!(vector1.len().await, 5); + assert_eq!(vector2.get(0).await, S([1u8].to_vec())); + assert_eq!(vector2.get(1).await, S([3u8].to_vec())); + assert_eq!(vector2.get(2).await, S([3u8].to_vec())); + assert_eq!(vector2.get(3).await, S([7u8].to_vec())); + assert_eq!( + vector2.get_many(&[0, 1, 2]).await, + vec![ + vector2.get(0).await, + vector2.get(1).await, + vector2.get(2).await + ] + ); + assert_eq!(vector2.get_many(&[]).await, vec![]); + assert_eq!( + vector2.get_many(&[1, 2]).await, + vec![vector2.get(1).await, vector2.get(2).await] + ); + assert_eq!( + vector2.get_many(&[2, 1]).await, + vec![vector2.get(2).await, vector2.get(1).await] + ); + assert_eq!(vector2.len().await, 4); + assert_eq!(singleton.get().await, singleton_value); + assert_eq!( + vec![ + S([8u8].to_vec()), + S([30u8].to_vec()), + S([4u8].to_vec()), + S([7u8].to_vec()), + S([8u8].to_vec()) + ], + vector1.get_all().await + ); + assert_eq!( + vec![ + S([1u8].to_vec()), + S([3u8].to_vec()), + S([3u8].to_vec()), + S([7u8].to_vec()), + ], + vector2.get_all().await + ); + + // persist and drop + rusty_storage.persist().await; + assert_eq!( + vector2.get_many(&[2, 1]).await, + vec![vector2.get(2).await, vector2.get(1).await] + ); + drop(rusty_storage); // <-- DB ref dropped + drop(vector1); // <-- DB ref dropped + drop(vector2); // <-- DB ref dropped + drop(singleton); // <-- final DB ref dropped (NeptuneLevelDb closes) + + // re-open NeptuneLevelDb / restore from disk + let new_db = NeptuneLevelDb::open_test_database(&db_path, true, None, None, None) + .await + .unwrap(); + let mut new_rusty_storage = SimpleRustyStorage::new(new_db); + let new_vector1 = new_rusty_storage.schema.new_vec::("test-vector1").await; + let mut new_vector2 = new_rusty_storage.schema.new_vec::("test-vector2").await; + + let new_singleton = new_rusty_storage + .schema + .new_singleton::("singleton".to_owned()) + .await; + + // test again + assert_eq!(new_vector1.get(0).await, S([8u8].to_vec())); + assert_eq!(new_vector1.get(1).await, S([30u8].to_vec())); + assert_eq!(new_vector1.get(2).await, S([4u8].to_vec())); + assert_eq!(new_vector1.get(3).await, S([7u8].to_vec())); + assert_eq!(new_vector1.get(4).await, S([8u8].to_vec())); + assert_eq!(new_vector1.len().await, 5); + assert_eq!(new_vector2.get(0).await, S([1u8].to_vec())); + assert_eq!(new_vector2.get(1).await, S([3u8].to_vec())); + assert_eq!(new_vector2.get(2).await, S([3u8].to_vec())); + assert_eq!(new_vector2.get(3).await, S([7u8].to_vec())); + assert_eq!(new_vector2.len().await, 4); + assert_eq!(new_singleton.get().await, singleton_value); + + // Test `get_many` for a restored NeptuneLevelDb + assert_eq!( + new_vector2.get_many(&[2, 1]).await, + vec![new_vector2.get(2).await, new_vector2.get(1).await] + ); + assert_eq!( + new_vector2.get_many(&[0, 1]).await, + vec![new_vector2.get(0).await, new_vector2.get(1).await] + ); + assert_eq!( + new_vector2.get_many(&[1, 0]).await, + vec![new_vector2.get(1).await, new_vector2.get(0).await] + ); + assert_eq!( + new_vector2.get_many(&[0, 1, 2, 3]).await, + vec![ + new_vector2.get(0).await, + new_vector2.get(1).await, + new_vector2.get(2).await, + new_vector2.get(3).await, + ] + ); + assert_eq!( + new_vector2.get_many(&[2]).await, + vec![new_vector2.get(2).await,] + ); + assert_eq!(new_vector2.get_many(&[]).await, vec![]); + + // Test `get_all` for a restored NeptuneLevelDb + assert_eq!( + vec![ + S([1u8].to_vec()), + S([3u8].to_vec()), + S([3u8].to_vec()), + S([7u8].to_vec()), + ], + new_vector2.get_all().await, + "`get_all` must return expected values, before mutation" + ); + new_vector2.set(1, S([130u8].to_vec())).await; + assert_eq!( + vec![ + S([1u8].to_vec()), + S([130u8].to_vec()), + S([3u8].to_vec()), + S([7u8].to_vec()), + ], + new_vector2.get_all().await, + "`get_all` must return expected values, after mutation" + ); + } + + #[should_panic( + expected = "Out-of-bounds. Got 2 but length was 2. persisted vector name: test-vector" + )] + #[tokio::test] + async fn out_of_bounds_using_get() { + let db = NeptuneLevelDb::open_new_test_database(true, None, None, None) + .await + .unwrap(); + + let mut rusty_storage = SimpleRustyStorage::new(db); + let mut vector = rusty_storage.schema.new_vec::("test-vector").await; + + vector.push(1).await; + vector.push(1).await; + vector.get(2).await; + } + + #[should_panic( + expected = "Out-of-bounds. Got index 2 but length was 2. persisted vector name: test-vector" + )] + #[tokio::test] + async fn out_of_bounds_using_get_many() { + let db = NeptuneLevelDb::open_new_test_database(true, None, None, None) + .await + .unwrap(); + + let mut rusty_storage = SimpleRustyStorage::new(db); + let mut vector = rusty_storage.schema.new_vec::("test-vector").await; + + vector.push(1).await; + vector.push(1).await; + vector.get_many(&[0, 0, 0, 1, 1, 2]).await; + } + + #[should_panic( + expected = "Out-of-bounds. Got 1 but length was 1. persisted vector name: test-vector" + )] + #[tokio::test] + async fn out_of_bounds_using_set_many() { + let db = NeptuneLevelDb::open_new_test_database(true, None, None, None) + .await + .unwrap(); + + let mut rusty_storage = SimpleRustyStorage::new(db); + let mut vector = rusty_storage.schema.new_vec::("test-vector").await; + + vector.push(1).await; + + // attempt to set 2 values, when only one is in vector. + vector.set_many([(0, 0), (1, 1)]).await; + } + + #[should_panic(expected = "size-mismatch. input has 2 elements and target has 1 elements")] + #[tokio::test] + async fn size_mismatch_too_many_using_set_all() { + let db = NeptuneLevelDb::open_new_test_database(true, None, None, None) + .await + .unwrap(); + + let mut rusty_storage = SimpleRustyStorage::new(db); + let mut vector = rusty_storage.schema.new_vec::("test-vector").await; + + vector.push(1).await; + + // attempt to set 2 values, when only one is in vector. + vector.set_all([0, 1]).await; + } + + #[should_panic(expected = "size-mismatch. input has 1 elements and target has 2 elements")] + #[tokio::test] + async fn size_mismatch_too_few_using_set_all() { + let db = NeptuneLevelDb::open_new_test_database(true, None, None, None) + .await + .unwrap(); + + let mut rusty_storage = SimpleRustyStorage::new(db); + let mut vector = rusty_storage.schema.new_vec::("test-vector").await; + + vector.push(0).await; + vector.push(1).await; + + // attempt to set 1 values, when two are in vector. + vector.set_all([5]).await; + } + + #[tokio::test] + async fn test_db_sync_and_send() { + fn sync_and_send(_t: T) {} + + // open new NeptuneLevelDb that will not be dropped on close. + let db: NeptuneLevelDb = + NeptuneLevelDb::open_new_test_database(false, None, None, None) + .await + .unwrap(); + sync_and_send(db); + } +} diff --git a/src/database/storage/storage_schema/pending_writes.rs b/src/database/storage/storage_schema/pending_writes.rs new file mode 100644 index 00000000..c2004a84 --- /dev/null +++ b/src/database/storage/storage_schema/pending_writes.rs @@ -0,0 +1,11 @@ +use super::enums::WriteOperation; + +/// Represents pending database write operations +#[derive(Debug, Clone, Default)] +pub(super) struct PendingWrites { + /// list of write ops, newest at end. cleared once persisted to DB. + pub(super) write_ops: Vec, + + /// increments each time write ops are persisted to DB. + pub(super) persist_count: usize, +} diff --git a/src/database/storage/storage_schema/rusty_key.rs b/src/database/storage/storage_schema/rusty_key.rs new file mode 100644 index 00000000..71460c77 --- /dev/null +++ b/src/database/storage/storage_schema/rusty_key.rs @@ -0,0 +1,55 @@ +use leveldb::database::key::IntoLevelDBKey; +use leveldb::error::Error; +use serde::{Deserialize, Serialize}; + +// Todo: consider making RustyKey a newtype for RustyValue and auto derive all its From impls +// using either `derive_more` or `newtype_derive` crate + +/// Represents a database key as bytes +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct RustyKey(pub Vec); + +impl From for RustyKey { + #[inline] + fn from(value: u8) -> Self { + Self([value].to_vec()) + } +} +impl From<(RustyKey, RustyKey)> for RustyKey { + #[inline] + fn from(value: (RustyKey, RustyKey)) -> Self { + let v0 = value.0 .0; + let v1 = value.1 .0; + RustyKey([v0, v1].concat()) + } +} +impl From for RustyKey { + #[inline] + fn from(value: u64) -> Self { + RustyKey(value.to_be_bytes().to_vec()) + } +} + +impl IntoLevelDBKey for RustyKey { + fn as_u8_slice_for_write(&self, f: &dyn Fn(&[u8]) -> Result<(), Error>) -> Result<(), Error> { + f(&self.0) + } + + fn as_u8_slice_for_get( + &self, + f: &dyn Fn(&[u8]) -> Result>, Error>, + ) -> Result>, Error> { + f(&self.0) + } +} + +impl From<&dyn IntoLevelDBKey> for RustyKey { + #[inline] + fn from(value: &dyn IntoLevelDBKey) -> Self { + let vec_u8 = value + .as_u8_slice_for_get(&|k| Ok(Some(k.to_vec()))) + .unwrap() + .unwrap(); + Self(vec_u8) + } +} diff --git a/src/database/storage/storage_schema/rusty_reader.rs b/src/database/storage/storage_schema/rusty_reader.rs new file mode 100644 index 00000000..a469ed0a --- /dev/null +++ b/src/database/storage/storage_schema/rusty_reader.rs @@ -0,0 +1,24 @@ +use super::super::super::neptune_leveldb::NeptuneLevelDb; +use super::{traits::StorageReader, RustyKey, RustyValue}; + +// Note: RustyReader and SimpleRustyReader appear to be exactly +// the same. Can we remove one of them? + +/// A read-only database interface +#[derive(Debug, Clone)] +pub struct RustyReader { + /// LevelDB Database + pub db: NeptuneLevelDb, +} + +impl StorageReader for RustyReader { + #[inline] + async fn get(&self, key: RustyKey) -> Option { + self.db.get(key).await + } + + #[inline] + async fn get_many(&self, keys: impl IntoIterator) -> Vec> { + futures::future::join_all(keys.into_iter().map(|key| self.db.get(key))).await + } +} diff --git a/src/database/storage/storage_schema/rusty_value.rs b/src/database/storage/storage_schema/rusty_value.rs new file mode 100644 index 00000000..49d4c0bc --- /dev/null +++ b/src/database/storage/storage_schema/rusty_value.rs @@ -0,0 +1,132 @@ +// RustyValue is an interface for serializing data as bytes for storing +// in the database. See type description below. +// +// RustyValue now makes use of `bincode` for serializing data. +// Any rust `serializer` crate that understands serde +// Serialize and Deserialize traits could be plugged in instead. +// +// Here is a good comparison of rust serializers and their performance/size. +// https://github.com/djkoloski/rust_serialization_benchmark +// See also this blog: +// https://david.kolo.ski/blog/rkyv-is-faster-than/ +// +// An important consideration is that many of these crates are not +// compatible with `serde`. of those that are, `bincode` and `postcard` +// appear to be the fastest options, and are pretty close. +// `postcard` seems to create smaller serialized data size though, +// so that could be a good reason to use it for a large DB like +// a blockchain. `ron` appears too slow to be a good option. +// +// With regards to blockchain size, we should also consider +// compressing our serialized data with something like zlib. +// In the above benchmarks, all libraries are compared with +// both uncompressed size and zlib compressed size. +// +// The fn that does the compression in the benchmark is: +// https://github.com/djkoloski/rust_serialization_benchmark/blob/e1f6a31a431d5e8c3889525696231acbb691cbd9/src/lib.rs#L169 +// +// For RustyValue, we would do the compression/decompression in the +// `serialize` and `deserialize` functions. +// +// Before making final decision(s), we should benchmark our +// code with real data and try out different crates/options. +// +// todo: consider moving the serialization functions, or perhaps all +// of rusty_value.rs to a top level module, eg twenty-first::serialization. + +use serde::{de::DeserializeOwned, Deserialize, Serialize}; + +use std::fmt::Debug; + +/// Represents a database value as bytes and provides conversions for standard types +/// +/// It is simple to extend RustyValue for use with any locally defined type +/// that implements `serde::Serialize` and `serde::Deserialize`. +/// +/// ## Examples +/// +/// ``` +/// use serde::{Serialize, Deserialize}; +/// use neptune_core::database::storage::storage_schema::RustyValue; +/// +/// #[derive(Debug, Clone, Serialize, Deserialize)] +/// pub struct Person { +/// name: String, +/// age: u16, +/// } +/// +/// impl From for Person { +/// fn from(value: RustyValue) -> Self { +/// value.into_any() +/// } +/// } +/// +/// impl From for RustyValue { +/// fn from(value: Person) -> Self { +/// Self::from_any(&value) +/// } +/// } +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RustyValue(pub Vec); + +impl RustyValue { + /// serialize a value `T` that implements `serde::Serialize` into `RustyValue` + /// + /// This provides the serialization for `RustyValue` in the database + /// (or anywhere else it is stored) + /// + /// At present `bincode` is used for de/serialization, but this could + /// change in the future. No guarantee is made as the to serialization format. + #[inline] + pub fn from_any(value: &T) -> Self { + Self(serialize(value)) + } + /// Deserialize `RustyValue` into a value `T` that implements `serde::de::DeserializeOwned` + /// + /// This provides the deserialization for `RustyValue` from the database + /// (or anywhere else it is stored) + /// + /// At present `bincode` is used for de/serialization, but this could + /// change in the future. No guarantee is made as the to serialization format. + #[inline] + pub fn into_any(&self) -> T { + deserialize(&self.0) + } +} + +/// serialize a value T that implements serde::Serialize into bytes +/// +/// At present `bincode` is used for de/serialization, but this could +/// change in the future. No guarantee is made as the to serialization format. +/// +// todo: consider compressing serialized bytes with zlib, or similar. +// see comments at top of file. +// todo: consider moving this fn, or perhaps all of rusty_value.rs +// to a top level module, eg twenty-first::serialization +#[inline] +pub fn serialize(value: &T) -> Vec { + bincode::serialize(value).expect("should have serialized T into bytes") + + // for now, we use bincode. but it would be so easy to switch to eg postcard or ron. + // ron::to_string(value).unwrap().as_bytes().to_vec() + // postcard::to_allocvec(value).unwrap() +} + +/// serialize bytes into a value T that implements serde::de::SerializeOwned +/// +/// At present `bincode` is used for de/serialization, but this could +/// change in the future. No guarantee is made as the to serialization format. +/// +// todo: consider decompressing serialized bytes with zlib, or similar. +// see comments at top of file. +// todo: consider moving this fn, or perhaps all of rusty_value.rs +// to a top level module, eg twenty-first::serialization +#[inline] +pub fn deserialize(bytes: &[u8]) -> T { + bincode::deserialize(bytes).expect("should have deserialized bytes") + + // for now, we use bincode. but it would be so easy to switch to eg postcard or ron. + // ron::from_str(String::from_utf8(bytes.to_vec()).unwrap().as_str()).unwrap() + // postcard::from_bytes(bytes).unwrap() +} diff --git a/src/database/storage/storage_schema/schema.rs b/src/database/storage/storage_schema/schema.rs new file mode 100644 index 00000000..0e3fb1b8 --- /dev/null +++ b/src/database/storage/storage_schema/schema.rs @@ -0,0 +1,183 @@ +use super::{traits::*, DbtSingleton, DbtVec, PendingWrites, SimpleRustyReader}; +use crate::locks::tokio::{AtomicRw, LockCallbackFn}; +use serde::{de::DeserializeOwned, Serialize}; +use std::{fmt::Display, sync::Arc}; + +/// Provides a virtual database schema. +/// +/// `DbtSchema` can create any number of instances of types that +/// implement the trait [`DbTable`]. We refer to these instances as +/// `table`. Examples are [`DbtVec`] and [`DbtSingleton`]. +/// +/// With proper usage (below), the application can perform writes +/// to any subset of the `table`s and then persist (write) the data +/// atomically to the database. +/// +/// Thus we get something like relational database transactions using `LevelDB` +/// key/val store. +/// +/// Important! Operations over multiple `table`s are NOT atomic +/// without additional locking by the application. +/// +/// This can be achieved by placing the `table`s into a heterogenous +/// container such as a `struct` or `tuple`. Then place an +/// `Arc>` or `Arc>` around the container. +/// +/// # Example: +/// +/// ```compile_fail +/// # // note: compile_fail due to: https://github.com/rust-lang/rust/issues/67295 +/// # tokio_test::block_on(async { +/// # use database::storage::{storage_vec::traits::*, storage_schema::{SimpleRustyStorage, traits::*}}; +/// # let db = database::NeptuneLevelDb::open_new_test_database(true, None, None, None).await.unwrap(); +/// use std::sync::Arc; +/// use tokio::sync::RwLock; +/// let mut storage = SimpleRustyStorage::new(db); +/// +/// let tables = ( +/// storage.schema.new_vec::("ages").await, +/// storage.schema.new_vec::("names").await, +/// storage.schema.new_singleton::("proceed").await +/// ); +/// +/// let mut atomic_tables = Arc::new(RwLock::new(tables)); +/// +/// // these mutations happen atomically in mem. +/// { +/// let mut lock = atomic_tables.write().await; +/// lock.0.push(5).await; +/// lock.1.push("Sally".into()).await; +/// lock.2.set(true).await; +/// } +/// +/// // all pending writes are persisted to DB in one atomic batch operation. +/// storage.persist(); +/// # }); +/// ``` +/// +/// In the example, the `table` were placed in a `tuple` container. +/// It works equally well to put them in a `struct`. If the tables +/// are all of the same type (including generics), they could be +/// placed in a collection type such as `Vec`, or `HashMap`. +/// +/// This crate provides [`AtomicRw`] and [`AtomicMutex`] +/// which are simple wrappers around `Arc>` and `Arc>`. +/// `DbtSchema` provides helper methods for wrapping your `table`s with +/// these. +/// +/// This is the recommended usage. +/// +/// # Example: +/// +/// ```compile_fail +/// # // note: compile_fail due to: https://github.com/rust-lang/rust/issues/67295 +/// # tokio_test::block_on(async { +/// # use database::storage::{storage_vec::traits::*, storage_schema::{SimpleRustyStorage, traits::*}}; +/// # let db = database::NeptuneLevelDb::open_new_test_database(true, None, None, None).await.unwrap(); +/// use neptune_core::locks::tokio::AtomicRw; +/// +/// let mut storage = SimpleRustyStorage::new(db); +/// +/// let tables = ( +/// storage.schema.new_vec::("ages").await, +/// storage.schema.new_vec::("names").await, +/// storage.schema.new_singleton::("proceed").await +/// ); +/// +/// let mut atomic_tables = AtomicRw::from(tables); +/// +/// // these mutations happen atomically in mem. +/// { +/// let mut lock = atomic_tables.lock_guard_mut().await; +/// lock.0.push(5).await; +/// lock.1.push("Sally".into()).await; +/// lock.2.set(true).await; +/// } +/// +/// // all pending writes are persisted to DB in one atomic batch operation. +/// storage.persist(); +/// # }); +/// ``` +pub struct DbtSchema { + /// Pending writes for all tables in this Schema. + /// These get written/cleared by StorageWriter::persist() + /// + /// todo: Can we get rid of this lock? + pub(super) pending_writes: AtomicRw, + + /// Database Reader + pub reader: Arc, + + /// If present, the provided callback function will be called + /// whenever a lock is acquired by a `DbTable` instantiated + /// by this `DbtSchema`. See [AtomicRw](crate::sync::AtomicRw) + pub lock_callback_fn: Option, + + /// indicates count of tables in this schema + pub table_count: u8, +} + +impl DbtSchema { + /// Instantiate a `DbtSchema` from a `SimpleRustyReader` and + /// optional `name` and lock acquisition callback. + /// See [AtomicRw](crate::sync::AtomicRw) + pub fn new( + reader: SimpleRustyReader, + name: Option<&str>, + lock_callback_fn: Option, + ) -> Self { + Self { + pending_writes: AtomicRw::from((PendingWrites::default(), name, lock_callback_fn)), + reader: Arc::new(reader), + lock_callback_fn, + table_count: 0, + } + } + + /// Create a new DbtVec + /// + /// All pending write operations of the DbtVec are stored + /// in the schema + #[inline] + pub async fn new_vec(&mut self, name: &str) -> DbtVec + where + V: Clone + 'static, + V: Serialize + DeserializeOwned + Send + Sync, + { + let pending_writes = self.pending_writes.clone(); + let reader = self.reader.clone(); + let key_prefix = self.table_count; + self.table_count += 1; + + let mut vector = DbtVec::::new(pending_writes, reader, key_prefix, name).await; + vector.restore_or_new().await; + + vector + } + + // possible future extension + // fn new_hashmap(&self) -> Arc>> { } + + /// Create a new DbtSingleton + /// + /// All pending write operations of the DbtSingleton are stored + /// in the schema + #[inline] + pub async fn new_singleton(&mut self, name: impl Into + Display) -> DbtSingleton + where + V: Default + Clone + 'static, + V: Serialize + DeserializeOwned + Send + Sync, + { + let key = self.table_count; + self.table_count += 1; + + let mut singleton = DbtSingleton::::new( + key, + self.pending_writes.clone(), + self.reader.clone(), + name.into(), + ); + singleton.restore_or_new().await; + singleton + } +} diff --git a/src/database/storage/storage_schema/simple_rusty_reader.rs b/src/database/storage/storage_schema/simple_rusty_reader.rs new file mode 100644 index 00000000..41b7f207 --- /dev/null +++ b/src/database/storage/storage_schema/simple_rusty_reader.rs @@ -0,0 +1,23 @@ +use super::super::super::neptune_leveldb::NeptuneLevelDb; +use super::{traits::StorageReader, RustyKey, RustyValue}; + +// Note: RustyReader and SimpleRustyReader appear to be exactly +// the same. Can we remove one of them? + +/// A read-only database interface +#[derive(Debug, Clone)] +pub struct SimpleRustyReader { + pub(super) db: NeptuneLevelDb, +} + +impl StorageReader for SimpleRustyReader { + #[inline] + async fn get(&self, key: RustyKey) -> Option { + self.db.get(key).await + } + + #[inline] + async fn get_many(&self, keys: impl IntoIterator) -> Vec> { + futures::future::join_all(keys.into_iter().map(|key| self.db.get(key))).await + } +} diff --git a/src/database/storage/storage_schema/simple_rusty_storage.rs b/src/database/storage/storage_schema/simple_rusty_storage.rs new file mode 100644 index 00000000..4bea5138 --- /dev/null +++ b/src/database/storage/storage_schema/simple_rusty_storage.rs @@ -0,0 +1,62 @@ +use super::super::super::neptune_leveldb::NeptuneLevelDb; +use super::{traits::StorageWriter, DbtSchema, SimpleRustyReader, WriteOperation}; +use super::{RustyKey, RustyValue}; +use crate::database::neptune_leveldb::WriteBatchAsync; +use crate::locks::tokio::LockCallbackFn; + +/// Database schema and tables logic for RustyLevelDB. You probably +/// want to implement your own storage class after this example so +/// that you can hardcode the schema in new(). But it is nevertheless +/// possible to use this struct and add to the schema. +pub struct SimpleRustyStorage { + /// dynamic DB Schema. (new tables may be added) + pub schema: DbtSchema, + db: NeptuneLevelDb, +} + +impl StorageWriter for SimpleRustyStorage { + #[inline] + async fn persist(&mut self) { + let mut write_ops = WriteBatchAsync::new(); + + // note: we read all pending ops and perform mutations + // in a single atomic operation. + { + let mut pending_writes = self.schema.pending_writes.lock_guard_mut().await; + for op in pending_writes.write_ops.iter() { + match op.clone() { + WriteOperation::Write(key, value) => write_ops.op_write(key, value), + WriteOperation::Delete(key) => write_ops.op_delete(key), + } + } + pending_writes.write_ops.clear(); + pending_writes.persist_count += 1; + } + + self.db.batch_write(write_ops).await + } +} + +impl SimpleRustyStorage { + /// Create a new SimpleRustyStorage + #[inline] + pub fn new(db: NeptuneLevelDb) -> Self { + let schema = DbtSchema::new(SimpleRustyReader { db: db.clone() }, None, None); + Self { schema, db } + } + + /// Create a new SimpleRustyStorage and provide a + /// name and lock acquisition callback for tracing + pub fn new_with_callback( + db: NeptuneLevelDb, + storage_name: &str, + lock_callback_fn: LockCallbackFn, + ) -> Self { + let schema = DbtSchema::new( + SimpleRustyReader { db: db.clone() }, + Some(storage_name), + Some(lock_callback_fn), + ); + Self { schema, db } + } +} diff --git a/src/database/storage/storage_schema/traits.rs b/src/database/storage/storage_schema/traits.rs new file mode 100644 index 00000000..4e5c6e23 --- /dev/null +++ b/src/database/storage/storage_schema/traits.rs @@ -0,0 +1,34 @@ +//! Traits that define the StorageSchema interface +//! +//! It is recommended to wildcard import these with +//! `use crate::database::storage::storage_vec::traits::*` + +use super::{RustyKey, RustyValue}; +pub use leveldb::database::key::IntoLevelDBKey; + +/// Defines table interface for types used by [`super::DbtSchema`] +#[allow(async_fn_in_trait)] +#[async_trait::async_trait] +pub trait DbTable { + // Retrieve all unwritten operations and empty write-queue + // async fn pull_queue(&mut self) -> Vec; + /// Restore existing table if present, else create a new one + async fn restore_or_new(&mut self); +} + +/// Defines storage reader interface +#[allow(async_fn_in_trait)] +pub trait StorageReader { + /// Return multiple values from storage, in the same order as the input keys + async fn get_many(&self, keys: impl IntoIterator) -> Vec>; + + /// Return a single value from storage + async fn get(&self, key: RustyKey) -> Option; +} + +/// Defines storage writer interface +#[allow(async_fn_in_trait)] +pub trait StorageWriter { + /// Write data to storage + async fn persist(&mut self); +} diff --git a/src/database/storage/storage_vec/mod.rs b/src/database/storage/storage_vec/mod.rs new file mode 100644 index 00000000..5cdac2c4 --- /dev/null +++ b/src/database/storage/storage_vec/mod.rs @@ -0,0 +1,386 @@ +//! Provides a NeptuneLevelDb backed Vector API that is thread-safe, cached, and atomic + +// We have split storage_vec into individual files, but for compatibility +// we still keep everything in mod storage_vec. +// +// To accomplish that, we keep the sub modules private, and +// add `pub use sub_module::*`. + +#![allow(missing_docs)] +// mod iterators; +pub mod traits; + +pub type Index = u64; +// pub use iterators::*; + +// note: we keep ordinary_vec around because it is +// used in DocTest examples, as it does not require DB. + +mod ordinary_vec; +mod ordinary_vec_private; +pub use ordinary_vec::*; + +#[cfg(test)] +mod tests { + + // use futures::pin_mut; + // use futures::stream::StreamExt; + use super::traits::*; + use super::*; + + use itertools::Itertools; + use rand::{Rng, RngCore}; + use std::collections::HashMap; + + /// Return a persisted vector and a regular in-memory vector with the same elements + async fn get_persisted_vec_with_length( + length: Index, + _name: &str, + ) -> (OrdinaryVec, Vec) { + let mut persisted_vec: OrdinaryVec = Default::default(); + let mut regular_vec = vec![]; + + let mut rng = rand::thread_rng(); + for _ in 0..length { + let value = rng.next_u64(); + persisted_vec.push(value).await; + regular_vec.push(value); + } + + // Sanity checks + assert_eq!(persisted_vec.len().await, regular_vec.len() as u64); + + (persisted_vec, regular_vec) + } + + async fn simple_prop>(mut delegated_db_vec: Storage) { + assert_eq!( + 0, + delegated_db_vec.len().await, + "Length must be zero at initialization" + ); + assert!( + delegated_db_vec.is_empty().await, + "Vector must be empty at initialization" + ); + + // push two values, check length. + delegated_db_vec.push([42; 13]).await; + delegated_db_vec.push([44; 13]).await; + assert_eq!(2, delegated_db_vec.len().await); + assert!(!delegated_db_vec.is_empty().await); + + // Check `get`, `set`, and `get_many` + assert_eq!([44; 13], delegated_db_vec.get(1).await); + assert_eq!([42; 13], delegated_db_vec.get(0).await); + assert_eq!( + vec![[42; 13], [44; 13]], + delegated_db_vec.get_many(&[0, 1]).await + ); + assert_eq!( + vec![[44; 13], [42; 13]], + delegated_db_vec.get_many(&[1, 0]).await + ); + assert_eq!(vec![[42; 13]], delegated_db_vec.get_many(&[0]).await); + assert_eq!(vec![[44; 13]], delegated_db_vec.get_many(&[1]).await); + assert_eq!( + Vec::<[u8; 13]>::default(), + delegated_db_vec.get_many(&[]).await + ); + + delegated_db_vec.set(0, [101; 13]).await; + delegated_db_vec.set(1, [200; 13]).await; + assert_eq!(vec![[101; 13]], delegated_db_vec.get_many(&[0]).await); + assert_eq!( + Vec::<[u8; 13]>::default(), + delegated_db_vec.get_many(&[]).await + ); + assert_eq!(vec![[200; 13]], delegated_db_vec.get_many(&[1]).await); + assert_eq!(vec![[200; 13]; 2], delegated_db_vec.get_many(&[1, 1]).await); + assert_eq!( + vec![[200; 13]; 3], + delegated_db_vec.get_many(&[1, 1, 1]).await + ); + assert_eq!( + vec![[200; 13], [101; 13], [200; 13]], + delegated_db_vec.get_many(&[1, 0, 1]).await + ); + + // test set_many, get_many. pass array to set_many + delegated_db_vec + .set_many([(0, [41; 13]), (1, [42; 13])]) + .await; + // get in reverse order + assert_eq!( + vec![[42; 13], [41; 13]], + delegated_db_vec.get_many(&[1, 0]).await + ); + + // set values back how they were before prior set_many() passing HashMap + delegated_db_vec + .set_many(HashMap::from([(0, [101; 13]), (1, [200; 13])])) + .await; + + // Pop two values, check length and return value of further pops + assert_eq!([200; 13], delegated_db_vec.pop().await.unwrap()); + assert_eq!(1, delegated_db_vec.len().await); + assert_eq!([101; 13], delegated_db_vec.pop().await.unwrap()); + assert!(delegated_db_vec.pop().await.is_none()); + assert_eq!(0, delegated_db_vec.len().await); + assert!(delegated_db_vec.pop().await.is_none()); + assert_eq!( + Vec::<[u8; 13]>::default(), + delegated_db_vec.get_many(&[]).await + ); + } + + #[tokio::test] + async fn test_simple_prop() { + let ordinary_vec: OrdinaryVec<[u8; 13]> = Default::default(); + simple_prop(ordinary_vec).await; + } + + #[tokio::test] + async fn multiple_vectors_in_one_db() { + let mut delegated_db_vec_a: OrdinaryVec = Default::default(); + let delegated_db_vec_b: OrdinaryVec = Default::default(); + + // push values to vec_a, verify vec_b is not affected + delegated_db_vec_a.push(1000).await; + delegated_db_vec_a.push(2000).await; + delegated_db_vec_a.push(3000).await; + + assert_eq!(3, delegated_db_vec_a.len().await); + assert_eq!(0, delegated_db_vec_b.len().await); + } + + #[tokio::test] + async fn test_set_many() { + let mut delegated_db_vec_a: OrdinaryVec = Default::default(); + + delegated_db_vec_a.push(10).await; + delegated_db_vec_a.push(20).await; + delegated_db_vec_a.push(30).await; + delegated_db_vec_a.push(40).await; + + // Allow `set_many` with empty input + delegated_db_vec_a.set_many([]).await; + assert_eq!( + vec![10, 20, 30], + delegated_db_vec_a.get_many(&[0, 1, 2]).await + ); + + // Perform an actual update with `set_many` + let updates = [(0, 100), (1, 200), (2, 300), (3, 400)]; + delegated_db_vec_a.set_many(updates).await; + + assert_eq!( + vec![100, 200, 300], + delegated_db_vec_a.get_many(&[0, 1, 2]).await + ); + + #[allow(clippy::shadow_unrelated)] + let updates = HashMap::from([(0, 1000), (1, 2000), (2, 3000)]); + delegated_db_vec_a.set_many(updates).await; + + assert_eq!( + vec![1000, 2000, 3000], + delegated_db_vec_a.get_many(&[0, 1, 2]).await + ); + } + + #[tokio::test] + async fn test_set_all() { + let mut delegated_db_vec_a: OrdinaryVec = Default::default(); + + delegated_db_vec_a.push(10).await; + delegated_db_vec_a.push(20).await; + delegated_db_vec_a.push(30).await; + + let updates = [100, 200, 300]; + delegated_db_vec_a.set_all(updates).await; + + assert_eq!( + vec![100, 200, 300], + delegated_db_vec_a.get_many(&[0, 1, 2]).await + ); + + #[allow(clippy::shadow_unrelated)] + let updates = vec![1000, 2000, 3000]; + delegated_db_vec_a.set_all(updates).await; + + assert_eq!( + vec![1000, 2000, 3000], + delegated_db_vec_a.get_many(&[0, 1, 2]).await + ); + } + + #[tokio::test] + async fn get_many_ordering_of_outputs() { + let mut delegated_db_vec_a: OrdinaryVec = Default::default(); + + delegated_db_vec_a.push(1000).await; + delegated_db_vec_a.push(2000).await; + delegated_db_vec_a.push(3000).await; + + // Test `get_many` ordering of outputs + assert_eq!( + vec![1000, 2000, 3000], + delegated_db_vec_a.get_many(&[0, 1, 2]).await + ); + assert_eq!( + vec![2000, 3000, 1000], + delegated_db_vec_a.get_many(&[1, 2, 0]).await + ); + assert_eq!( + vec![3000, 1000, 2000], + delegated_db_vec_a.get_many(&[2, 0, 1]).await + ); + assert_eq!( + vec![2000, 1000, 3000], + delegated_db_vec_a.get_many(&[1, 0, 2]).await + ); + assert_eq!( + vec![3000, 2000, 1000], + delegated_db_vec_a.get_many(&[2, 1, 0]).await + ); + assert_eq!( + vec![1000, 3000, 2000], + delegated_db_vec_a.get_many(&[0, 2, 1]).await + ); + } + + #[tokio::test] + async fn delegated_vec_pbt() { + let (mut persisted_vector, mut normal_vector) = + get_persisted_vec_with_length(10000, "vec 1").await; + + let mut rng = rand::thread_rng(); + for _ in 0..10000 { + match rng.gen_range(0..=4) { + 0 => { + // `push` + let push_val = rng.next_u64(); + persisted_vector.push(push_val).await; + normal_vector.push(push_val); + } + 1 => { + // `pop` + let persisted_pop_val = persisted_vector.pop().await.unwrap(); + let normal_pop_val = normal_vector.pop().unwrap(); + assert_eq!(persisted_pop_val, normal_pop_val); + } + 2 => { + // `get_many` + let index = rng.gen_range(0..normal_vector.len()); + assert_eq!(Vec::::default(), persisted_vector.get_many(&[]).await); + assert_eq!( + normal_vector[index], + persisted_vector.get(index as u64).await + ); + assert_eq!( + vec![normal_vector[index]], + persisted_vector.get_many(&[index as u64]).await + ); + assert_eq!( + vec![normal_vector[index], normal_vector[index]], + persisted_vector + .get_many(&[index as u64, index as u64]) + .await + ); + } + 3 => { + // `set` + let value = rng.next_u64(); + let index = rng.gen_range(0..normal_vector.len()); + normal_vector[index] = value; + persisted_vector.set(index as u64, value).await; + } + 4 => { + // `set_many` + let indices: Vec = (0..rng.gen_range(0..10)) + .map(|_| rng.gen_range(0..normal_vector.len() as u64)) + .unique() + .collect(); + let values: Vec = (0..indices.len()).map(|_| rng.next_u64()).collect_vec(); + let update: Vec<(u64, u64)> = + indices.into_iter().zip_eq(values.into_iter()).collect(); + for (key, val) in update.iter() { + normal_vector[*key as usize] = *val; + } + persisted_vector.set_many(update).await; + } + _ => unreachable!(), + } + } + + // Check equality after above loop + assert_eq!(normal_vector.len(), persisted_vector.len().await as usize); + for (i, nvi) in normal_vector.iter().enumerate() { + assert_eq!(*nvi, persisted_vector.get(i as u64).await); + } + + // Check equality using `get_many` + assert_eq!( + normal_vector, + persisted_vector + .get_many(&(0..normal_vector.len() as u64).collect_vec()) + .await + ); + } + + #[should_panic(expected = "Out-of-bounds. Got index 3 but length was 1.")] + #[tokio::test] + async fn panic_on_out_of_bounds_get() { + let (delegated_db_vec, _) = get_persisted_vec_with_length(1, "unit test vec 0").await; + delegated_db_vec.get(3).await; + } + + #[should_panic(expected = "Out-of-bounds. Got index 3 but length was 1.")] + #[tokio::test] + async fn panic_on_out_of_bounds_get_many() { + let (delegated_db_vec, _) = get_persisted_vec_with_length(1, "unit test vec 0").await; + delegated_db_vec.get_many(&[3]).await; + } + + #[should_panic(expected = "index out of bounds: the len is 1 but the index is 1")] + #[tokio::test] + async fn panic_on_out_of_bounds_set() { + let (mut delegated_db_vec, _) = get_persisted_vec_with_length(1, "unit test vec 0").await; + delegated_db_vec.set(1, 3000).await; + } + + #[should_panic(expected = "index out of bounds: the len is 1 but the index is 1")] + #[tokio::test] + async fn panic_on_out_of_bounds_set_many() { + let (mut delegated_db_vec, _) = get_persisted_vec_with_length(1, "unit test vec 0").await; + + // attempt to set 2 values, when only one is in vector. + delegated_db_vec.set_many([(0, 0), (1, 1)]).await; + } + + #[should_panic(expected = "size-mismatch. input has 2 elements and target has 1 elements.")] + #[tokio::test] + async fn panic_on_size_mismatch_set_all() { + let (mut delegated_db_vec, _) = get_persisted_vec_with_length(1, "unit test vec 0").await; + + // attempt to set 2 values, when only one is in vector. + delegated_db_vec.set_all([1, 2]).await; + } + + #[should_panic(expected = "Out-of-bounds. Got index 11 but length was 11.")] + #[tokio::test] + async fn panic_on_out_of_bounds_get_even_though_value_exists_in_persistent_memory() { + let (mut delegated_db_vec, _) = get_persisted_vec_with_length(12, "unit test vec 0").await; + delegated_db_vec.pop().await; + delegated_db_vec.get(11).await; + } + + #[should_panic(expected = "index out of bounds: the len is 11 but the index is 11")] + #[tokio::test] + async fn panic_on_out_of_bounds_set_even_though_value_exists_in_persistent_memory() { + let (mut delegated_db_vec, _) = get_persisted_vec_with_length(12, "unit test vec 0").await; + delegated_db_vec.pop().await; + delegated_db_vec.set(11, 5000).await; + } +} diff --git a/src/database/storage/storage_vec/ordinary_vec.rs b/src/database/storage/storage_vec/ordinary_vec.rs new file mode 100644 index 00000000..0fa187fb --- /dev/null +++ b/src/database/storage/storage_vec/ordinary_vec.rs @@ -0,0 +1,147 @@ +use serde::de::DeserializeOwned; +use serde::Serialize; + +use super::ordinary_vec_private::OrdinaryVecPrivate; +use super::{traits::*, Index}; + +/// Implements [`StorageVec`]` trait for an ordinary (in memory) `Vec` +#[derive(Debug, Clone, Default)] +pub struct OrdinaryVec(OrdinaryVecPrivate); + +impl From> for OrdinaryVec { + fn from(v: Vec) -> Self { + Self(OrdinaryVecPrivate(v)) + } +} + +#[async_trait::async_trait] +impl StorageVecBase + for OrdinaryVec +{ + #[inline] + async fn is_empty(&self) -> bool { + self.0.is_empty() + } + + #[inline] + async fn len(&self) -> Index { + self.0.len() + } + + #[inline] + async fn get(&self, index: Index) -> T { + self.0.get(index) + } + + #[inline] + async fn get_many(&self, indices: &[Index]) -> Vec { + self.0.get_many(indices) + } + + /// Return all stored elements in a vector, whose index matches the StorageVec's. + /// It's the caller's responsibility that there is enough memory to store all elements. + #[inline] + async fn get_all(&self) -> Vec { + self.0.get_all() + } + + // async fn many_iter<'a>( + // &'a self, + // indices: impl IntoIterator + 'a, + // ) -> Box + '_> { + // // note: this lock is moved into the iterator closure and is not + // // released until caller drops the returned iterator + // let inner = self.read_lock(); + + // Box::new(indices.into_iter().map(move |i| { + // assert!( + // i < inner.len(), + // "Out-of-bounds. Got index {} but length was {}.", + // i, + // inner.len(), + // ); + // (i, inner.get(i)) + // })) + // } + + // async fn many_iter_values<'a>( + // &'a self, + // indices: impl IntoIterator + 'a, + // ) -> Box + '_> { + // // note: this lock is moved into the iterator closure and is not + // // released until caller drops the returned iterator + // let inner = self.read_lock(); + + // Box::new(indices.into_iter().map(move |i| { + // assert!( + // i < inner.len(), + // "Out-of-bounds. Got index {} but length was {}.", + // i, + // inner.len(), + // ); + // inner.get(i) + // })) + // } + + #[inline] + async fn set(&mut self, index: Index, value: T) { + // note: on 32 bit systems, this could panic. + self.0.set(index, value); + } + + #[inline] + async fn set_many(&mut self, key_vals: impl IntoIterator + Send) { + self.0.set_many(key_vals) + } + + #[inline] + async fn pop(&mut self) -> Option { + self.0.pop() + } + + #[inline] + async fn push(&mut self, value: T) { + self.0.push(value); + } + + #[inline] + async fn clear(&mut self) { + self.0.clear(); + } +} + +// Async Streams (ie async iterators) +impl StorageVecStream + for OrdinaryVec +{ +} + +impl StorageVec + for OrdinaryVec +{ +} + +#[cfg(test)] +mod tests { + use super::super::traits::tests as trait_tests; + use super::*; + + pub fn mk_test_vec_u64() -> OrdinaryVec { + OrdinaryVec::from(vec![]) + } + + pub mod streams { + use super::*; + use trait_tests::streams as stream_tests; + + #[tokio::test] + pub async fn stream() { + stream_tests::stream(mk_test_vec_u64()).await + } + + #[tokio::test] + pub async fn stream_many() { + stream_tests::stream_many(mk_test_vec_u64()).await + } + } +} diff --git a/src/database/storage/storage_vec/ordinary_vec_private.rs b/src/database/storage/storage_vec/ordinary_vec_private.rs new file mode 100644 index 00000000..77913328 --- /dev/null +++ b/src/database/storage/storage_vec/ordinary_vec_private.rs @@ -0,0 +1,67 @@ +use serde::{de::DeserializeOwned, Serialize}; + +use super::Index; + +#[derive(Debug, Clone, Default)] +pub(crate) struct OrdinaryVecPrivate(pub(super) Vec); + +impl OrdinaryVecPrivate { + #[inline] + pub(super) fn get(&self, index: Index) -> T { + self.0 + .get(index as usize) + .unwrap_or_else(|| { + panic!( + "Out-of-bounds. Got index {} but length was {}.", + index, + self.0.len(), + ) + }) + .clone() + } + + pub(super) fn get_many(&self, indices: &[Index]) -> Vec { + indices.iter().map(|i| self.get(*i)).collect() + } + + pub(super) fn get_all(&self) -> Vec { + self.0.clone() + } + + #[inline] + pub(super) fn set(&mut self, index: Index, value: T) { + self.0[index as usize] = value; + } + + #[inline] + pub(super) fn is_empty(&self) -> bool { + self.0.is_empty() + } + + #[inline] + pub(super) fn len(&self) -> Index { + self.0.len() as Index + } + + #[inline] + pub(super) fn set_many(&mut self, key_vals: impl IntoIterator) { + for (key, val) in key_vals.into_iter() { + self.set(key, val); + } + } + + #[inline] + pub(super) fn pop(&mut self) -> Option { + self.0.pop() + } + + #[inline] + pub(super) fn push(&mut self, value: T) { + self.0.push(value); + } + + #[inline] + pub(super) fn clear(&mut self) { + self.0.clear(); + } +} diff --git a/src/database/storage/storage_vec/traits.rs b/src/database/storage/storage_vec/traits.rs new file mode 100644 index 00000000..da306843 --- /dev/null +++ b/src/database/storage/storage_vec/traits.rs @@ -0,0 +1,301 @@ +//! Traits that define the StorageVec interface +//! +//! It is recommended to wildcard import these with +//! `use crate::database::storage::storage_vec::traits::*` + +use super::Index; + +// for Stream (async Iterator equiv) +use async_stream::stream; +use futures::stream::Stream; + +// re-export to make life easier for users of our API. +pub use futures::{pin_mut, StreamExt}; + +// #[allow(async_fn_in_trait)] +#[async_trait::async_trait] +pub trait StorageVecBase { + /// check if collection is empty + async fn is_empty(&self) -> bool; + + /// get collection length + async fn len(&self) -> Index; + // fn len(&self) -> impl Future + Send; + + /// get single element at index + async fn get(&self, index: Index) -> T; + + /// get multiple elements matching indices + /// + /// This is a convenience method. For large collections + /// it may be more efficient to use a Stream or for-loop + /// and avoid allocating a Vec + async fn get_many(&self, indices: &[Index]) -> Vec; + // #[inline] + // async fn get_many(&self, indices: &[Index]) -> Vec { + // self.many_iter(indices.to_vec()).map(|(_i, v)| v).collect() + // } + + /// get all elements + /// + /// This is a convenience method. For large collections + /// it may be more efficient to use an iterator or for-loop + /// and avoid allocating a Vec + #[inline] + async fn get_all(&self) -> Vec { + let all_indices = (0..self.len().await).collect::>(); + self.get_many(&all_indices).await + } + + /// set a single element. + /// + /// note: The update is performed as a single atomic operation. + async fn set(&mut self, index: Index, value: T); + + /// set multiple elements. + /// + /// It is the caller's responsibility to ensure that index values are + /// unique. If not, the last value with the same index will win. + /// For unordered collections such as HashMap, the behavior is undefined. + /// + /// note: all updates are performed as a single atomic operation. + /// readers will see either the before or after state, + /// never an intermediate state. + async fn set_many(&mut self, key_vals: impl IntoIterator + Send); + + /// set elements from start to vals.count() + /// + /// note: all updates are performed as a single atomic operation. + /// readers will see either the before or after state, + /// never an intermediate state. + #[inline] + async fn set_first_n(&mut self, vals: impl IntoIterator + Send) { + self.set_many((0..).zip(vals).collect::>()).await; + } + + /// set all elements with a simple list of values in an array or Vec + /// and validates that input length matches target length. + /// + /// panics if input length does not match target length. + /// + /// note: all updates are performed as a single atomic operation. + /// readers will see either the before or after state, + /// never an intermediate state. + /// + /// note: casts the input value's length from usize to Index + /// so will panic if vals contains more than 2^32 items + #[inline] + async fn set_all( + &mut self, + vals: impl IntoIterator + Send> + Send, + ) { + let iter = vals.into_iter(); + + assert!( + iter.len() as Index == self.len().await, + "size-mismatch. input has {} elements and target has {} elements.", + iter.len(), + self.len().await, + ); + + self.set_first_n(iter).await; + } + + /// pop an element from end of collection + /// + /// note: The update is performed as a single atomic operation. + async fn pop(&mut self) -> Option; + + /// push an element to end of collection + /// + /// note: The update is performed as a single atomic operation. + async fn push(&mut self, value: T); + + /// Removes all elements from the collection + /// + /// note: The update is performed as a single atomic operation. + async fn clear(&mut self); +} + +#[allow(async_fn_in_trait)] +pub trait StorageVecStream: StorageVecBase { + /// get an async Stream for iterating over all elements by key/val + /// + /// # Example: + /// ``` + /// # tokio_test::block_on(async { + /// # use neptune_core::database::storage::storage_vec::{OrdinaryVec, traits::*}; + /// # let mut vec = OrdinaryVec::::from(vec![1,2,3,4,5,6,7,8,9]); + /// + /// let stream = vec.stream().await; + /// pin_mut!(stream); // needed for iteration + /// + /// while let Some((key, val)) = stream.next().await { + /// println!("{key}: {val}") + /// } + /// # }) + /// ``` + #[inline] + async fn stream<'a>(&'a self) -> impl Stream + 'a + where + T: 'a, + { + self.stream_many(0..self.len().await).await + } + + /// get an async Stream for iterating over all elements by value + /// + /// # Example: + /// ``` + /// # tokio_test::block_on(async { + /// # use neptune_core::database::storage::storage_vec::{OrdinaryVec, traits::*}; + /// # let mut vec = OrdinaryVec::::from(vec![1,2,3,4,5,6,7,8,9]); + /// + /// let stream = vec.stream_values().await; + /// pin_mut!(stream); // needed for iteration + /// + /// while let Some(val) = stream.next().await { + /// println!("{val}") + /// } + /// # }) + /// ``` + #[inline] + async fn stream_values<'a>(&'a self) -> impl Stream + 'a + where + T: 'a, + { + self.stream_many_values(0..self.len().await).await + } + + /// get an async Stream for iterating over elements matching indices by key/value + /// + /// # Example: + /// ``` + /// # tokio_test::block_on(async { + /// # use neptune_core::database::storage::storage_vec::{OrdinaryVec, traits::*}; + /// # let mut vec = OrdinaryVec::::from(vec![1,2,3,4,5,6,7,8,9]); + /// + /// let stream = vec.stream_many([2,3,7]).await; + /// pin_mut!(stream); // needed for iteration + /// + /// while let Some((key, val)) = stream.next().await { + /// println!("{key}: {val}") + /// } + /// # }) + /// ``` + async fn stream_many<'a>( + &'a self, + indices: impl IntoIterator + 'a, + ) -> impl Stream + 'a + where + T: 'a, + { + stream! { + for i in indices.into_iter() { + yield (i, self.get(i).await) + } + } + } + + /// get an async Stream for iterating over elements matching indices by value + /// + /// # Example: + /// ``` + /// # tokio_test::block_on(async { + /// # use neptune_core::database::storage::storage_vec::{OrdinaryVec, traits::*}; + /// # let mut vec = OrdinaryVec::::from(vec![1,2,3,4,5,6,7,8,9]); + /// + /// let stream = vec.stream_many_values([2,3,7]).await; + /// pin_mut!(stream); // needed for iteration + /// + /// while let Some(val) = stream.next().await { + /// println!("{val}") + /// } + /// # }) + /// ``` + async fn stream_many_values<'a>( + &'a self, + indices: impl IntoIterator + 'a, + ) -> impl Stream + 'a + where + T: 'a, + { + stream! { + for i in indices.into_iter() { + yield self.get(i).await + } + } + } +} + +pub trait StorageVec: StorageVecBase + StorageVecStream {} + +pub(in super::super) trait StorageVecIterMut: StorageVec {} + +#[cfg(test)] +pub(in super::super) mod tests { + use super::*; + // use itertools::Itertools; + + pub mod streams { + use super::*; + use futures::{pin_mut, StreamExt}; + + pub async fn prepare_streams_test_vec(vec: &mut impl StorageVecBase) { + vec.clear().await; + for i in 0..4 { + vec.push(i * 10).await; + } + } + + pub async fn stream(mut vec: impl StorageVecStream) { + prepare_streams_test_vec(&mut vec).await; + + { + let mut vals = vec![]; + let stream = vec.stream().await; + pin_mut!(stream); // needed for iteration + while let Some(value) = stream.next().await { + vals.push(value); + } + assert_eq!(vals, vec![(0, 0), (1, 10), (2, 20), (3, 30)]); + } + + vec.clear().await; + + { + let mut vals = vec![]; + let stream = vec.stream().await; + pin_mut!(stream); // needed for iteration + while let Some(value) = stream.next().await { + vals.push(value); + } + assert_eq!(vals, vec![]); + } + } + + pub async fn stream_many(mut vec: impl StorageVecStream) { + prepare_streams_test_vec(&mut vec).await; + + { + let mut vals = vec![]; + let stream = vec.stream_many([0, 1, 2, 3]).await; + pin_mut!(stream); // needed for iteration + while let Some(value) = stream.next().await { + vals.push(value); + } + assert_eq!(vals, vec![(0, 0), (1, 10), (2, 20), (3, 30)]); + } + + { + let mut vals = vec![]; + let stream = vec.stream_many([1, 2]).await; + pin_mut!(stream); // needed for iteration + while let Some(value) = stream.next().await { + vals.push(value); + } + assert_eq!(vals, vec![(1, 10), (2, 20)]); + } + } + } +} diff --git a/src/lib.rs b/src/lib.rs index 1248cedd..0439e906 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,6 +7,8 @@ pub mod config_models; pub mod connect_to_peers; pub mod database; +pub mod locks; +pub mod macros; pub mod main_loop; pub mod mine_loop; pub mod models; @@ -39,12 +41,13 @@ use crate::rpc_server::RPC; use anyhow::{Context, Result}; use config_models::cli_args; -use crate::twenty_first::sync::{LockCallbackFn, LockEvent}; -use crate::util_types::sync::tokio as sync_tokio; +use crate::locks::tokio as sync_tokio; +use crate::locks::tokio::{LockCallbackFn, LockEvent}; use chrono::{DateTime, Local, NaiveDateTime, Utc}; use futures::future; use futures::Future; use futures::StreamExt; + use models::blockchain::block::Block; use models::blockchain::shared::Hash; use models::peer::PeerInfo; @@ -242,6 +245,7 @@ pub async fn initialize(cli_args: cli_args::Args) -> Result<()> { state: rpc_state_lock.clone(), rpc_server_to_main_tx: rpc_server_to_main_tx.clone(), }; + channel.execute(server.serve()).for_each(spawn) }) // Max 10 channels. diff --git a/src/locks/mod.rs b/src/locks/mod.rs new file mode 100644 index 00000000..e5818307 --- /dev/null +++ b/src/locks/mod.rs @@ -0,0 +1,4 @@ +//! Provides simplified lock types for sharing data between threads + +pub mod std; +pub mod tokio; diff --git a/src/locks/std/atomic_mutex.rs b/src/locks/std/atomic_mutex.rs new file mode 100644 index 00000000..af09c754 --- /dev/null +++ b/src/locks/std/atomic_mutex.rs @@ -0,0 +1,432 @@ +use super::traits::Atomic; +use super::{LockAcquisition, LockCallbackFn, LockCallbackInfo, LockEvent, LockType}; +use std::ops::{Deref, DerefMut}; +use std::sync::{Arc, Mutex, MutexGuard}; + +/// An `Arc>` wrapper to make data thread-safe and easy to work with. +/// +/// # Example +/// ``` +/// # use neptune_core::locks::std::{AtomicMutex, traits::*}; +/// struct Car { +/// year: u16, +/// }; +/// let mut atomic_car = AtomicMutex::from(Car{year: 2016}); +/// atomic_car.lock(|c| println!("year: {}", c.year)); +/// atomic_car.lock_mut(|mut c| c.year = 2023); +/// ``` +/// +/// It is also possible to provide a name and callback fn +/// during instantiation. In this way, the application +/// can easily trace lock acquisitions. +/// +/// # Examples +/// ``` +/// # use neptune_core::locks::std::{AtomicMutex, LockEvent, LockCallbackFn}; +/// struct Car { +/// year: u16, +/// }; +/// +/// pub fn log_lock_event(lock_event: LockEvent) { +/// let (event, info, acquisition) = +/// match lock_event { +/// LockEvent::TryAcquire{info, acquisition} => ("TryAcquire", info, acquisition), +/// LockEvent::Acquire{info, acquisition} => ("Acquire", info, acquisition), +/// LockEvent::Release{info, acquisition} => ("Release", info, acquisition), +/// }; +/// +/// println!( +/// "{} lock `{}` of type `{}` for `{}` by\n\t|-- thread {}, `{:?}`", +/// event, +/// info.name().unwrap_or("?"), +/// info.lock_type(), +/// acquisition, +/// std::thread::current().name().unwrap_or("?"), +/// std::thread::current().id(), +/// ); +/// } +/// const LOG_LOCK_EVENT_CB: LockCallbackFn = log_lock_event; +/// +/// let mut atomic_car = AtomicMutex::::from((Car{year: 2016}, Some("car"), Some(LOG_LOCK_EVENT_CB))); +/// atomic_car.lock(|c| {println!("year: {}", c.year)}); +/// atomic_car.lock_mut(|mut c| {c.year = 2023}); +/// ``` +/// +/// results in: +/// ```text +/// TryAcquire lock `car` of type `Mutex` for `Read` by +/// |-- thread main, `ThreadId(1)` +/// Acquire lock `car` of type `Mutex` for `Read` by +/// |-- thread main, `ThreadId(1)` +/// year: 2016 +/// Release lock `car` of type `Mutex` for `Read` by +/// |-- thread main, `ThreadId(1)` +/// TryAcquire lock `car` of type `Mutex` for `Write` by +/// |-- thread main, `ThreadId(1)` +/// Acquire lock `car` of type `Mutex` for `Write` by +/// |-- thread main, `ThreadId(1)` +/// Release lock `car` of type `Mutex` for `Write` by +/// |-- thread main, `ThreadId(1)` +/// ``` +#[derive(Debug)] +pub struct AtomicMutex { + inner: Arc>, + lock_callback_info: LockCallbackInfo, +} + +impl Default for AtomicMutex { + fn default() -> Self { + Self { + inner: Default::default(), + lock_callback_info: LockCallbackInfo::new(LockType::Mutex, None, None), + } + } +} + +impl From for AtomicMutex { + #[inline] + fn from(t: T) -> Self { + Self { + inner: Arc::new(Mutex::new(t)), + lock_callback_info: LockCallbackInfo::new(LockType::Mutex, None, None), + } + } +} +impl From<(T, Option, Option)> for AtomicMutex { + /// Create from an optional name and an optional callback function, which + /// is called when a lock event occurs. + #[inline] + fn from(v: (T, Option, Option)) -> Self { + Self { + inner: Arc::new(Mutex::new(v.0)), + lock_callback_info: LockCallbackInfo::new(LockType::Mutex, v.1, v.2), + } + } +} +impl From<(T, Option<&str>, Option)> for AtomicMutex { + /// Create from a name ref and an optional callback function, which + /// is called when a lock event occurs. + #[inline] + fn from(v: (T, Option<&str>, Option)) -> Self { + Self { + inner: Arc::new(Mutex::new(v.0)), + lock_callback_info: LockCallbackInfo::new( + LockType::Mutex, + v.1.map(|s| s.to_owned()), + v.2, + ), + } + } +} + +impl Clone for AtomicMutex { + fn clone(&self) -> Self { + Self { + lock_callback_info: self.lock_callback_info.clone(), + inner: self.inner.clone(), + } + } +} + +impl From> for AtomicMutex { + #[inline] + fn from(t: Mutex) -> Self { + Self { + inner: Arc::new(t), + lock_callback_info: LockCallbackInfo::new(LockType::Mutex, None, None), + } + } +} +impl From<(Mutex, Option, Option)> for AtomicMutex { + /// Create from an `Mutex` plus an optional name + /// and an optional callback function, which is called + /// when a lock event occurs. + #[inline] + fn from(v: (Mutex, Option, Option)) -> Self { + Self { + inner: Arc::new(v.0), + lock_callback_info: LockCallbackInfo::new(LockType::Mutex, v.1, v.2), + } + } +} + +impl TryFrom> for Mutex { + type Error = Arc>; + fn try_from(t: AtomicMutex) -> Result, Self::Error> { + Arc::>::try_unwrap(t.inner) + } +} + +impl From>> for AtomicMutex { + #[inline] + fn from(t: Arc>) -> Self { + Self { + inner: t, + lock_callback_info: LockCallbackInfo::new(LockType::Mutex, None, None), + } + } +} +impl From<(Arc>, Option, Option)> for AtomicMutex { + /// Create from an `Arc>` plus an optional name and + /// an optional callback function, which is called when a lock + /// event occurs. + #[inline] + fn from(v: (Arc>, Option, Option)) -> Self { + Self { + inner: v.0, + lock_callback_info: LockCallbackInfo::new(LockType::Mutex, v.1, v.2), + } + } +} + +impl From> for Arc> { + #[inline] + fn from(t: AtomicMutex) -> Self { + t.inner + } +} + +// note: we impl the Atomic trait methods here also so they +// can be used without caller having to use the trait. +impl AtomicMutex { + pub const fn const_new( + t: Arc>, + name: Option, + lock_callback_fn: Option, + ) -> Self { + Self { + inner: t, + lock_callback_info: LockCallbackInfo { + lock_info_owned: super::shared::LockInfoOwned { + name, + lock_type: LockType::Mutex, + }, + lock_callback_fn, + }, + } + } + + /// Acquire read lock and return an `AtomicMutexGuard` + /// + /// # Examples + /// ``` + /// # use neptune_core::locks::std::{AtomicMutex, traits::*}; + /// struct Car { + /// year: u16, + /// }; + /// let atomic_car = AtomicMutex::from(Car{year: 2016}); + /// let year = atomic_car.lock_guard().year; + /// ``` + pub fn lock_guard(&self) -> AtomicMutexGuard { + self.try_acquire_read_cb(); + let guard = self.inner.lock().expect("Read lock should succeed"); + AtomicMutexGuard::new(guard, &self.lock_callback_info, LockAcquisition::Read) + } + + /// Acquire write lock and return an `AtomicMutexGuard` + /// + /// # Examples + /// ``` + /// # use neptune_core::locks::std::{AtomicMutex, traits::*}; + /// struct Car { + /// year: u16, + /// }; + /// let mut atomic_car = AtomicMutex::from(Car{year: 2016}); + /// atomic_car.lock_guard_mut().year = 2022; + /// ``` + pub fn lock_guard_mut(&mut self) -> AtomicMutexGuard { + self.try_acquire_write_cb(); + let guard = self.inner.lock().expect("Write lock should succeed"); + AtomicMutexGuard::new(guard, &self.lock_callback_info, LockAcquisition::Write) + } + + /// Immutably access the data of type `T` in a closure and possibly return a result of type `R` + /// + /// # Examples + /// ``` + /// # use neptune_core::locks::std::{AtomicMutex, traits::*}; + /// struct Car { + /// year: u16, + /// }; + /// let atomic_car = AtomicMutex::from(Car{year: 2016}); + /// atomic_car.lock(|c| println!("year: {}", c.year)); + /// let year = atomic_car.lock(|c| c.year); + /// ``` + pub fn lock(&self, f: F) -> R + where + F: FnOnce(&T) -> R, + { + self.try_acquire_read_cb(); + let guard = self.inner.lock().expect("Read lock should succeed"); + let my_guard = + AtomicMutexGuard::new(guard, &self.lock_callback_info, LockAcquisition::Read); + f(&my_guard) + } + + /// Mutably access the data of type `T` in a closure and possibly return a result of type `R` + /// + /// # Examples + /// ``` + /// # use neptune_core::locks::std::{AtomicMutex, traits::*}; + /// struct Car { + /// year: u16, + /// }; + /// let mut atomic_car = AtomicMutex::from(Car{year: 2016}); + /// atomic_car.lock_mut(|mut c| {c.year = 2022}); + /// let year = atomic_car.lock_mut(|mut c| {c.year = 2023; c.year}); + /// ``` + pub fn lock_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut T) -> R, + { + self.try_acquire_write_cb(); + let guard = self.inner.lock().expect("Write lock should succeed"); + let mut my_guard = + AtomicMutexGuard::new(guard, &self.lock_callback_info, LockAcquisition::Write); + f(&mut my_guard) + } + + /// get copy of the locked value T (if T implements Copy). + /// + /// # Example + /// ``` + /// # use neptune_core::locks::std::{AtomicMutex, traits::*}; + /// let atomic_u64 = AtomicMutex::from(25u64); + /// let age = atomic_u64.get(); + /// ``` + #[inline] + pub fn get(&self) -> T + where + T: Copy, + { + self.lock(|v| *v) + } + + /// set the locked value T (if T implements Copy). + /// + /// # Example + /// ``` + /// # use neptune_core::locks::std::{AtomicMutex, traits::*}; + /// let mut atomic_bool = AtomicMutex::from(false); + /// atomic_bool.set(true); + /// ``` + #[inline] + pub fn set(&mut self, value: T) + where + T: Copy, + { + self.lock_mut(|v| *v = value) + } + + /// retrieve lock name if present, or None + #[inline] + pub fn name(&self) -> Option<&str> { + self.lock_callback_info.lock_info_owned.name.as_deref() + } + + fn try_acquire_read_cb(&self) { + if let Some(cb) = self.lock_callback_info.lock_callback_fn { + cb(LockEvent::TryAcquire { + info: self.lock_callback_info.lock_info_owned.as_lock_info(), + acquisition: LockAcquisition::Read, + }); + } + } + + fn try_acquire_write_cb(&self) { + if let Some(cb) = self.lock_callback_info.lock_callback_fn { + cb(LockEvent::TryAcquire { + info: self.lock_callback_info.lock_info_owned.as_lock_info(), + acquisition: LockAcquisition::Write, + }); + } + } +} + +impl Atomic for AtomicMutex { + #[inline] + fn lock(&self, f: F) -> R + where + F: FnOnce(&T) -> R, + { + AtomicMutex::::lock(self, f) + } + + #[inline] + fn lock_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut T) -> R, + { + AtomicMutex::::lock_mut(self, f) + } +} + +/// A wrapper for [MutexGuard](std::sync::MutexGuard) that +/// can optionally call a callback to notify when the +/// lock event occurs +pub struct AtomicMutexGuard<'a, T> { + guard: MutexGuard<'a, T>, + lock_callback_info: &'a LockCallbackInfo, + acquisition: LockAcquisition, +} + +impl<'a, T> AtomicMutexGuard<'a, T> { + fn new( + guard: MutexGuard<'a, T>, + lock_callback_info: &'a LockCallbackInfo, + acquisition: LockAcquisition, + ) -> Self { + if let Some(cb) = lock_callback_info.lock_callback_fn { + cb(LockEvent::Acquire { + info: lock_callback_info.lock_info_owned.as_lock_info(), + acquisition, + }); + } + Self { + guard, + lock_callback_info, + acquisition, + } + } +} + +impl<'a, T> Drop for AtomicMutexGuard<'a, T> { + fn drop(&mut self) { + let lock_callback_info = self.lock_callback_info; + if let Some(cb) = lock_callback_info.lock_callback_fn { + cb(LockEvent::Release { + info: lock_callback_info.lock_info_owned.as_lock_info(), + acquisition: self.acquisition, + }); + } + } +} + +impl<'a, T> Deref for AtomicMutexGuard<'a, T> { + type Target = T; + fn deref(&self) -> &Self::Target { + &self.guard + } +} + +impl<'a, T> DerefMut for AtomicMutexGuard<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.guard + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + // Verify (compile-time) that AtomicMutex::lock() and ::lock_mut() accept mutable values. (FnMut) + fn mutable_assignment() { + let name = "Jim".to_string(); + let mut atomic_name = AtomicMutex::from(name); + + let mut new_name: String = Default::default(); + atomic_name.lock(|n| new_name = n.to_string()); + atomic_name.lock_mut(|n| new_name = n.to_string()); + } +} diff --git a/src/locks/std/atomic_rw.rs b/src/locks/std/atomic_rw.rs new file mode 100644 index 00000000..27e0b704 --- /dev/null +++ b/src/locks/std/atomic_rw.rs @@ -0,0 +1,450 @@ +use super::shared::LockAcquisition; +use super::traits::Atomic; +use super::{LockCallbackFn, LockCallbackInfo, LockEvent, LockType}; +use std::ops::{Deref, DerefMut}; +use std::sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}; + +/// An `Arc>` wrapper to make data thread-safe and easy to work with. +/// +/// # Example +/// ``` +/// # use neptune_core::locks::std::{AtomicRw, traits::*}; +/// struct Car { +/// year: u16, +/// }; +/// let mut atomic_car = AtomicRw::from(Car{year: 2016}); +/// atomic_car.lock(|c| println!("year: {}", c.year)); +/// atomic_car.lock_mut(|mut c| c.year = 2023); +/// ``` +/// +/// It is also possible to provide a name and callback fn +/// during instantiation. In this way, the application +/// can easily trace lock acquisitions. +/// +/// # Examples +/// ``` +/// # use neptune_core::locks::std::{AtomicRw, LockEvent, LockCallbackFn}; +/// struct Car { +/// year: u16, +/// }; +/// +/// pub fn log_lock_event(lock_event: LockEvent) { +/// let (event, info, acquisition) = +/// match lock_event { +/// LockEvent::TryAcquire{info, acquisition} => ("TryAcquire", info, acquisition), +/// LockEvent::Acquire{info, acquisition} => ("Acquire", info, acquisition), +/// LockEvent::Release{info, acquisition} => ("Release", info, acquisition), +/// }; +/// +/// println!( +/// "{} lock `{}` of type `{}` for `{}` by\n\t|-- thread {}, `{:?}`", +/// event, +/// info.name().unwrap_or("?"), +/// info.lock_type(), +/// acquisition, +/// std::thread::current().name().unwrap_or("?"), +/// std::thread::current().id(), +/// ); +/// } +/// const LOG_LOCK_EVENT_CB: LockCallbackFn = log_lock_event; +/// +/// let mut atomic_car = AtomicRw::::from((Car{year: 2016}, Some("car"), Some(LOG_LOCK_EVENT_CB))); +/// atomic_car.lock(|c| {println!("year: {}", c.year)}); +/// atomic_car.lock_mut(|mut c| {c.year = 2023}); +/// ``` +/// +/// results in: +/// ```text +/// TryAcquire lock `car` of type `RwLock` for `Read` by +/// |-- thread main, `ThreadId(1)` +/// Acquire lock `car` of type `RwLock` for `Read` by +/// |-- thread main, `ThreadId(1)` +/// year: 2016 +/// Release lock `car` of type `RwLock` for `Read` by +/// |-- thread main, `ThreadId(1)` +/// TryAcquire lock `car` of type `RwLock` for `Write` by +/// |-- thread main, `ThreadId(1)` +/// Acquire lock `car` of type `RwLock` for `Write` by +/// |-- thread main, `ThreadId(1)` +/// Release lock `car` of type `RwLock` for `Write` by +/// |-- thread main, `ThreadId(1)` +/// ``` +#[derive(Debug)] +pub struct AtomicRw { + inner: Arc>, + lock_callback_info: LockCallbackInfo, +} + +impl Default for AtomicRw { + fn default() -> Self { + Self { + inner: Default::default(), + lock_callback_info: LockCallbackInfo::new(LockType::RwLock, None, None), + } + } +} + +impl From for AtomicRw { + #[inline] + fn from(t: T) -> Self { + Self { + inner: Arc::new(RwLock::new(t)), + lock_callback_info: LockCallbackInfo::new(LockType::RwLock, None, None), + } + } +} +impl From<(T, Option, Option)> for AtomicRw { + /// Create from an optional name and an optional callback function, which + /// is called when a lock is event occurs. + #[inline] + fn from(v: (T, Option, Option)) -> Self { + Self { + inner: Arc::new(RwLock::new(v.0)), + lock_callback_info: LockCallbackInfo::new(LockType::RwLock, v.1, v.2), + } + } +} +impl From<(T, Option<&str>, Option)> for AtomicRw { + /// Create from a name ref and an optional callback function, which + /// is called when a lock event occurs. + #[inline] + fn from(v: (T, Option<&str>, Option)) -> Self { + Self { + inner: Arc::new(RwLock::new(v.0)), + lock_callback_info: LockCallbackInfo::new( + LockType::RwLock, + v.1.map(|s| s.to_owned()), + v.2, + ), + } + } +} + +impl Clone for AtomicRw { + fn clone(&self) -> Self { + Self { + lock_callback_info: self.lock_callback_info.clone(), + inner: self.inner.clone(), + } + } +} + +impl From> for AtomicRw { + #[inline] + fn from(t: RwLock) -> Self { + Self { + inner: Arc::new(t), + lock_callback_info: LockCallbackInfo::new(LockType::RwLock, None, None), + } + } +} +impl From<(RwLock, Option, Option)> for AtomicRw { + /// Create from an `RwLock` plus an optional name + /// and an optional callback function, which is called + /// when a lock event occurs. + #[inline] + fn from(v: (RwLock, Option, Option)) -> Self { + Self { + inner: Arc::new(v.0), + lock_callback_info: LockCallbackInfo::new(LockType::RwLock, v.1, v.2), + } + } +} + +impl TryFrom> for RwLock { + type Error = Arc>; + fn try_from(t: AtomicRw) -> Result, Self::Error> { + Arc::>::try_unwrap(t.inner) + } +} + +impl From>> for AtomicRw { + #[inline] + fn from(t: Arc>) -> Self { + Self { + inner: t, + lock_callback_info: LockCallbackInfo::new(LockType::RwLock, None, None), + } + } +} +impl From<(Arc>, Option, Option)> for AtomicRw { + /// Create from an `Arc>` plus an optional name and + /// an optional callback function, which is called when a lock + /// event occurs. + #[inline] + fn from(v: (Arc>, Option, Option)) -> Self { + Self { + inner: v.0, + lock_callback_info: LockCallbackInfo::new(LockType::RwLock, v.1, v.2), + } + } +} + +impl From> for Arc> { + #[inline] + fn from(t: AtomicRw) -> Self { + t.inner + } +} + +// note: we impl the Atomic trait methods here also so they +// can be used without caller having to use the trait. +impl AtomicRw { + /// Acquire read lock and return an `RwLockReadGuard` + /// + /// # Examples + /// ``` + /// # use neptune_core::locks::std::{AtomicRw, traits::*}; + /// struct Car { + /// year: u16, + /// }; + /// let atomic_car = AtomicRw::from(Car{year: 2016}); + /// let year = atomic_car.lock_guard().year; + /// ``` + pub fn lock_guard(&self) -> AtomicRwReadGuard { + self.try_acquire_read_cb(); + let guard = self.inner.read().expect("Read lock should succeed"); + AtomicRwReadGuard::new(guard, &self.lock_callback_info) + } + + /// Acquire write lock and return an `RwLockWriteGuard` + /// + /// # Examples + /// ``` + /// # use neptune_core::locks::std::{AtomicRw, traits::*}; + /// struct Car { + /// year: u16, + /// }; + /// let mut atomic_car = AtomicRw::from(Car{year: 2016}); + /// atomic_car.lock_guard_mut().year = 2022; + /// ``` + pub fn lock_guard_mut(&mut self) -> AtomicRwWriteGuard { + self.try_acquire_write_cb(); + let guard = self.inner.write().expect("Write lock should succeed"); + AtomicRwWriteGuard::new(guard, &self.lock_callback_info) + } + + /// Immutably access the data of type `T` in a closure and possibly return a result of type `R` + /// + /// # Examples + /// ``` + /// # use neptune_core::locks::std::{AtomicRw, traits::*}; + /// struct Car { + /// year: u16, + /// }; + /// let atomic_car = AtomicRw::from(Car{year: 2016}); + /// atomic_car.lock(|c| println!("year: {}", c.year)); + /// let year = atomic_car.lock(|c| c.year); + /// ``` + pub fn lock(&self, f: F) -> R + where + F: FnOnce(&T) -> R, + { + self.try_acquire_read_cb(); + let guard = self.inner.read().expect("Read lock should succeed"); + let my_guard = AtomicRwReadGuard::new(guard, &self.lock_callback_info); + f(&my_guard) + } + + /// Mutably access the data of type `T` in a closure and possibly return a result of type `R` + /// + /// # Examples + /// ``` + /// # use neptune_core::locks::std::{AtomicRw, traits::*}; + /// struct Car { + /// year: u16, + /// }; + /// let mut atomic_car = AtomicRw::from(Car{year: 2016}); + /// atomic_car.lock_mut(|mut c| {c.year = 2022}); + /// let year = atomic_car.lock_mut(|mut c| {c.year = 2023; c.year}); + /// ``` + pub fn lock_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut T) -> R, + { + self.try_acquire_write_cb(); + let guard = self.inner.write().expect("Write lock should succeed"); + let mut my_guard = AtomicRwWriteGuard::new(guard, &self.lock_callback_info); + f(&mut my_guard) + } + + /// get copy of the locked value T (if T implements Copy). + /// + /// # Example + /// ``` + /// # use neptune_core::locks::std::{AtomicRw, traits::*}; + /// let atomic_u64 = AtomicRw::from(25u64); + /// let age = atomic_u64.get(); + /// ``` + #[inline] + pub fn get(&self) -> T + where + T: Copy, + { + self.lock(|v| *v) + } + + /// set the locked value T (if T implements Copy). + /// + /// # Example + /// ``` + /// # use neptune_core::locks::std::{AtomicRw, traits::*}; + /// let mut atomic_bool = AtomicRw::from(false); + /// atomic_bool.set(true); + /// ``` + #[inline] + pub fn set(&mut self, value: T) + where + T: Copy, + { + self.lock_mut(|v| *v = value) + } + + /// retrieve lock name if present, or None + #[inline] + pub fn name(&self) -> Option<&str> { + self.lock_callback_info.lock_info_owned.name.as_deref() + } + + fn try_acquire_read_cb(&self) { + if let Some(cb) = self.lock_callback_info.lock_callback_fn { + cb(LockEvent::TryAcquire { + info: self.lock_callback_info.lock_info_owned.as_lock_info(), + acquisition: LockAcquisition::Read, + }); + } + } + + fn try_acquire_write_cb(&self) { + if let Some(cb) = self.lock_callback_info.lock_callback_fn { + cb(LockEvent::TryAcquire { + info: self.lock_callback_info.lock_info_owned.as_lock_info(), + acquisition: LockAcquisition::Write, + }); + } + } +} + +impl Atomic for AtomicRw { + #[inline] + fn lock(&self, f: F) -> R + where + F: FnOnce(&T) -> R, + { + AtomicRw::::lock(self, f) + } + + #[inline] + fn lock_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut T) -> R, + { + AtomicRw::::lock_mut(self, f) + } +} + +/// A wrapper for [RwLockReadGuard](std::sync::RwLockReadGuard) that +/// can optionally call a callback to notify when a +/// lock event occurs. +pub struct AtomicRwReadGuard<'a, T> { + guard: RwLockReadGuard<'a, T>, + lock_callback_info: &'a LockCallbackInfo, +} + +impl<'a, T> AtomicRwReadGuard<'a, T> { + fn new(guard: RwLockReadGuard<'a, T>, lock_callback_info: &'a LockCallbackInfo) -> Self { + if let Some(cb) = lock_callback_info.lock_callback_fn { + cb(LockEvent::Acquire { + info: lock_callback_info.lock_info_owned.as_lock_info(), + acquisition: LockAcquisition::Read, + }); + } + Self { + guard, + lock_callback_info, + } + } +} + +impl<'a, T> Drop for AtomicRwReadGuard<'a, T> { + fn drop(&mut self) { + let lock_callback_info = self.lock_callback_info; + if let Some(cb) = lock_callback_info.lock_callback_fn { + cb(LockEvent::Release { + info: lock_callback_info.lock_info_owned.as_lock_info(), + acquisition: LockAcquisition::Read, + }); + } + } +} + +impl<'a, T> Deref for AtomicRwReadGuard<'a, T> { + type Target = T; + fn deref(&self) -> &Self::Target { + &self.guard + } +} + +/// A wrapper for [RwLockWriteGuard](std::sync::RwLockWriteGuard) that +/// can optionally call a callback to notify when the +/// a lock event occurs. +pub struct AtomicRwWriteGuard<'a, T> { + guard: RwLockWriteGuard<'a, T>, + lock_callback_info: &'a LockCallbackInfo, +} + +impl<'a, T> AtomicRwWriteGuard<'a, T> { + fn new(guard: RwLockWriteGuard<'a, T>, lock_callback_info: &'a LockCallbackInfo) -> Self { + if let Some(cb) = lock_callback_info.lock_callback_fn { + cb(LockEvent::Acquire { + info: lock_callback_info.lock_info_owned.as_lock_info(), + acquisition: LockAcquisition::Write, + }); + } + Self { + guard, + lock_callback_info, + } + } +} + +impl<'a, T> Drop for AtomicRwWriteGuard<'a, T> { + fn drop(&mut self) { + let lock_callback_info = self.lock_callback_info; + if let Some(cb) = lock_callback_info.lock_callback_fn { + cb(LockEvent::Release { + info: lock_callback_info.lock_info_owned.as_lock_info(), + acquisition: LockAcquisition::Write, + }); + } + } +} + +impl<'a, T> Deref for AtomicRwWriteGuard<'a, T> { + type Target = T; + fn deref(&self) -> &Self::Target { + &self.guard + } +} + +impl<'a, T> DerefMut for AtomicRwWriteGuard<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.guard + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + // Verify (compile-time) that AtomicRw::lock() and ::lock_mut() accept mutable values. (FnMut) + fn mutable_assignment() { + let name = "Jim".to_string(); + let mut atomic_name = AtomicRw::from(name); + + let mut new_name: String = Default::default(); + atomic_name.lock(|n| new_name = n.to_string()); + atomic_name.lock_mut(|n| new_name = n.to_string()); + } +} diff --git a/src/locks/std/mod.rs b/src/locks/std/mod.rs new file mode 100644 index 00000000..4ffc341b --- /dev/null +++ b/src/locks/std/mod.rs @@ -0,0 +1,12 @@ +//! Provides simplified lock types for sharing data between threads + +mod atomic_mutex; +mod atomic_rw; +mod shared; +pub mod traits; + +pub use atomic_mutex::AtomicMutex; +pub use atomic_rw::{AtomicRw, AtomicRwReadGuard, AtomicRwWriteGuard}; +pub use shared::{LockAcquisition, LockCallbackFn, LockEvent, LockInfo, LockType}; + +use shared::LockCallbackInfo; diff --git a/src/util_types/sync/tokio/shared.rs b/src/locks/std/shared.rs similarity index 100% rename from src/util_types/sync/tokio/shared.rs rename to src/locks/std/shared.rs diff --git a/src/locks/std/traits.rs b/src/locks/std/traits.rs new file mode 100644 index 00000000..d5ea7a80 --- /dev/null +++ b/src/locks/std/traits.rs @@ -0,0 +1,67 @@ +//! Traits that define the [`sync`](crate::sync) interface + +pub trait Atomic { + /// Immutably access the data of type `T` in a closure and possibly return a result of type `R` + /// + /// # Example + /// ``` + /// # use neptune_core::locks::std::{AtomicRw, traits::*}; + /// struct Car { + /// year: u16, + /// }; + /// let atomic_car = AtomicRw::from(Car{year: 2016}); + /// atomic_car.lock(|c| {println!("year: {}", c.year); }); + /// let year = atomic_car.lock(|c| c.year); + /// ``` + fn lock(&self, f: F) -> R + where + F: FnOnce(&T) -> R; + + /// Mutably access the data of type `T` in a closure and possibly return a result of type `R` + /// + /// # Example + /// ``` + /// # use neptune_core::locks::std::{AtomicRw, traits::*}; + /// struct Car { + /// year: u16, + /// }; + /// let mut atomic_car = AtomicRw::from(Car{year: 2016}); + /// atomic_car.lock_mut(|mut c| {c.year = 2022;}); + /// let year = atomic_car.lock_mut(|mut c| {c.year = 2023; c.year}); + /// ``` + fn lock_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut T) -> R; + + /// get copy of the locked value T (if T implements Copy). + /// + /// # Example + /// ``` + /// # use neptune_core::locks::std::{AtomicRw, traits::*}; + /// let atomic_u64 = AtomicRw::from(25u64); + /// let age = atomic_u64.get(); + /// ``` + #[inline] + fn get(&self) -> T + where + T: Copy, + { + self.lock(|v| *v) + } + + /// set the locked value T (if T implements Copy). + /// + /// # Example + /// ``` + /// # use neptune_core::locks::std::{AtomicRw, traits::*}; + /// let mut atomic_bool = AtomicRw::from(false); + /// atomic_bool.set(true); + /// ``` + #[inline] + fn set(&mut self, value: T) + where + T: Copy, + { + self.lock_mut(|v| *v = value) + } +} diff --git a/src/util_types/sync/tokio/atomic_mutex.rs b/src/locks/tokio/atomic_mutex.rs similarity index 96% rename from src/util_types/sync/tokio/atomic_mutex.rs rename to src/locks/tokio/atomic_mutex.rs index 4768e07d..72c4fe82 100644 --- a/src/util_types/sync/tokio/atomic_mutex.rs +++ b/src/locks/tokio/atomic_mutex.rs @@ -8,7 +8,7 @@ use tokio::sync::{Mutex, MutexGuard}; /// /// # Examples /// ``` -/// # use neptune_core::util_types::sync::tokio::AtomicMutex; +/// # use neptune_core::locks::tokio::AtomicMutex; /// struct Car { /// year: u16, /// }; @@ -25,7 +25,7 @@ use tokio::sync::{Mutex, MutexGuard}; /// /// # Examples /// ``` -/// # use neptune_core::util_types::sync::tokio::{AtomicMutex, LockEvent, LockCallbackFn}; +/// # use neptune_core::locks::tokio::{AtomicMutex, LockEvent, LockCallbackFn}; /// struct Car { /// year: u16, /// }; @@ -198,7 +198,7 @@ impl AtomicMutex { /// /// # Examples /// ``` - /// # use neptune_core::util_types::sync::tokio::AtomicMutex; + /// # use neptune_core::locks::tokio::AtomicMutex; /// struct Car { /// year: u16, /// }; @@ -217,7 +217,7 @@ impl AtomicMutex { /// /// # Examples /// ``` - /// # use neptune_core::util_types::sync::tokio::AtomicMutex; + /// # use neptune_core::locks::tokio::AtomicMutex; /// struct Car { /// year: u16, /// }; @@ -236,7 +236,7 @@ impl AtomicMutex { /// /// # Examples /// ``` - /// # use neptune_core::util_types::sync::tokio::AtomicMutex; + /// # use neptune_core::locks::tokio::AtomicMutex; /// struct Car { /// year: u16, /// }; @@ -261,7 +261,7 @@ impl AtomicMutex { /// /// # Examples /// ``` - /// # use neptune_core::util_types::sync::tokio::AtomicMutex; + /// # use neptune_core::locks::tokio::AtomicMutex; /// struct Car { /// year: u16, /// }; @@ -292,7 +292,7 @@ impl AtomicMutex { /// /// # Examples /// ``` - /// # use neptune_core::util_types::sync::tokio::AtomicMutex; + /// # use neptune_core::locks::tokio::AtomicMutex; /// # use futures::future::FutureExt; /// struct Car { /// year: u16, @@ -319,7 +319,7 @@ impl AtomicMutex { /// /// # Examples /// ``` - /// # use neptune_core::util_types::sync::tokio::AtomicMutex; + /// # use neptune_core::locks::tokio::AtomicMutex; /// # use futures::future::FutureExt; /// struct Car { /// year: u16, diff --git a/src/util_types/sync/tokio/atomic_rw.rs b/src/locks/tokio/atomic_rw.rs similarity index 96% rename from src/util_types/sync/tokio/atomic_rw.rs rename to src/locks/tokio/atomic_rw.rs index 9f2adead..67b1ace0 100644 --- a/src/util_types/sync/tokio/atomic_rw.rs +++ b/src/locks/tokio/atomic_rw.rs @@ -8,7 +8,7 @@ use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; /// /// # Examples /// ``` -/// # use neptune_core::util_types::sync::tokio::AtomicRw; +/// # use neptune_core::locks::tokio::AtomicRw; /// struct Car { /// year: u16, /// }; @@ -24,7 +24,7 @@ use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; /// /// # Examples /// ``` -/// # use neptune_core::util_types::sync::tokio::{AtomicRw, LockEvent, LockCallbackFn}; +/// # use neptune_core::locks::tokio::{AtomicRw, LockEvent, LockCallbackFn}; /// struct Car { /// year: u16, /// }; @@ -197,7 +197,7 @@ impl AtomicRw { /// /// # Examples /// ``` - /// # use neptune_core::util_types::sync::tokio::AtomicRw; + /// # use neptune_core::locks::tokio::AtomicRw; /// struct Car { /// year: u16, /// }; @@ -216,7 +216,7 @@ impl AtomicRw { /// /// # Examples /// ``` - /// # use neptune_core::util_types::sync::tokio::AtomicRw; + /// # use neptune_core::locks::tokio::AtomicRw; /// struct Car { /// year: u16, /// }; @@ -235,7 +235,7 @@ impl AtomicRw { /// /// # Examples /// ``` - /// # use neptune_core::util_types::sync::tokio::AtomicRw; + /// # use neptune_core::locks::tokio::AtomicRw; /// struct Car { /// year: u16, /// }; @@ -259,7 +259,7 @@ impl AtomicRw { /// /// # Examples /// ``` - /// # use neptune_core::util_types::sync::tokio::AtomicRw; + /// # use neptune_core::locks::tokio::AtomicRw; /// struct Car { /// year: u16, /// }; @@ -286,7 +286,7 @@ impl AtomicRw { /// /// # Examples /// ``` - /// # use neptune_core::util_types::sync::tokio::AtomicRw; + /// # use neptune_core::locks::tokio::AtomicRw; /// # use futures::future::FutureExt; /// struct Car { /// year: u16, @@ -312,7 +312,7 @@ impl AtomicRw { /// /// # Examples /// ``` - /// # use neptune_core::util_types::sync::tokio::AtomicRw; + /// # use neptune_core::locks::tokio::AtomicRw; /// # use futures::future::FutureExt; /// struct Car { /// year: u16, @@ -331,6 +331,12 @@ impl AtomicRw { f(&mut guard).await } + /// retrieve lock name if present, or None + #[inline] + pub fn name(&self) -> Option<&str> { + self.lock_callback_info.lock_info_owned.name.as_deref() + } + fn try_acquire_read_cb(&self) { if let Some(cb) = self.lock_callback_info.lock_callback_fn { cb(LockEvent::TryAcquire { diff --git a/src/util_types/sync/tokio/mod.rs b/src/locks/tokio/mod.rs similarity index 100% rename from src/util_types/sync/tokio/mod.rs rename to src/locks/tokio/mod.rs diff --git a/src/locks/tokio/shared.rs b/src/locks/tokio/shared.rs new file mode 100644 index 00000000..26e27dcd --- /dev/null +++ b/src/locks/tokio/shared.rs @@ -0,0 +1,108 @@ +/// Indicates the lock's underlying type +#[derive(Debug, Clone, Copy)] +pub enum LockType { + Mutex, + RwLock, +} + +impl std::fmt::Display for LockType { + #[inline] + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Mutex => write!(f, "Mutex"), + Self::RwLock => write!(f, "RwLock"), + } + } +} + +/// Indicates how a lock was acquired. +#[derive(Debug, Clone, Copy)] +pub enum LockAcquisition { + Read, + Write, +} + +impl std::fmt::Display for LockAcquisition { + #[inline] + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Read => write!(f, "Read"), + Self::Write => write!(f, "Write"), + } + } +} + +#[derive(Debug, Clone)] +pub(super) struct LockInfoOwned { + pub name: Option, + pub lock_type: LockType, +} +impl LockInfoOwned { + #[inline] + pub fn as_lock_info(&self) -> LockInfo<'_> { + LockInfo { + name: self.name.as_deref(), + lock_type: self.lock_type, + } + } +} + +/// Contains metadata about a lock +#[derive(Debug, Clone)] +pub struct LockInfo<'a> { + name: Option<&'a str>, + lock_type: LockType, +} +impl<'a> LockInfo<'a> { + /// get the lock's name + #[inline] + pub fn name(&self) -> Option<&str> { + self.name + } + + /// get the lock's type + #[inline] + pub fn lock_type(&self) -> LockType { + self.lock_type + } +} + +#[derive(Debug, Clone)] +pub(super) struct LockCallbackInfo { + pub lock_info_owned: LockInfoOwned, + pub lock_callback_fn: Option, +} +impl LockCallbackInfo { + #[inline] + pub fn new( + lock_type: LockType, + name: Option, + lock_callback_fn: Option, + ) -> Self { + Self { + lock_info_owned: LockInfoOwned { name, lock_type }, + lock_callback_fn, + } + } +} + +/// Represents an event (acquire/release) for a lock +#[derive(Debug, Clone)] +pub enum LockEvent<'a> { + TryAcquire { + info: LockInfo<'a>, + acquisition: LockAcquisition, + }, + Acquire { + info: LockInfo<'a>, + acquisition: LockAcquisition, + }, + Release { + info: LockInfo<'a>, + acquisition: LockAcquisition, + }, +} + +/// A callback fn for receiving [LockEvent] event +/// each time a lock is acquired or released. +pub type LockCallbackFn = fn(lock_event: LockEvent); diff --git a/src/util_types/sync/tokio/traits.rs b/src/locks/tokio/traits.rs similarity index 89% rename from src/util_types/sync/tokio/traits.rs rename to src/locks/tokio/traits.rs index f786d377..2aed44b8 100644 --- a/src/util_types/sync/tokio/traits.rs +++ b/src/locks/tokio/traits.rs @@ -10,7 +10,7 @@ pub trait Atomic { /// /// # Example /// ``` - /// # use neptune_core::util_types::sync::tokio::{AtomicRw, traits::*}; + /// # use neptune_core::locks::tokio::{AtomicRw, traits::*}; /// struct Car { /// year: u16, /// }; @@ -28,7 +28,7 @@ pub trait Atomic { /// /// # Example /// ``` - /// # use neptune_core::util_types::sync::tokio::{AtomicRw, traits::*}; + /// # use neptune_core::locks::tokio::{AtomicRw, traits::*}; /// struct Car { /// year: u16, /// }; diff --git a/src/macros.rs b/src/macros.rs new file mode 100644 index 00000000..d6e69d2d --- /dev/null +++ b/src/macros.rs @@ -0,0 +1,210 @@ +/// executes an expression, times duration, and emits trace! message +/// +/// The trace level is `tracing::Level::TRACE` by default. +/// +/// Accepts arguments in 3 forms: +/// duration!(myfunc()) +/// duration!(myfunc(), message) +/// duration!(myfunc(), message, trace_level) +#[allow(unused_macros)] +macro_rules! duration { + ($target: expr, $message: expr, $lvl: expr) => {{ + let (output, duration) = $crate::time_fn_call(|| $target); + let msg = format!( + "at {}:{}{}\n-- executed expression --\n{}\n -- duration: {} secs --", + file!(), + line!(), + if $message.len() > 0 { + format! {"\n{}", $message} + } else { + "".to_string() + }, + stringify!($target), + duration + ); + match $lvl { + tracing::Level::INFO => tracing::info!("{}", msg), + tracing::Level::TRACE => tracing::trace!("{}", msg), + tracing::Level::DEBUG => tracing::trace!("{}", msg), + tracing::Level::WARN => tracing::warn!("{}", msg), + tracing::Level::ERROR => tracing::error!("{}", msg), + } + output + }}; + ($target: expr, $message: expr) => { + $crate::macros::duration!($target, $message, tracing::Level::TRACE) + }; + ($target: expr) => { + $crate::macros::duration!($target, "", tracing::Level::TRACE) + }; +} + +/// executes an expression, times duration, and emits info! message +/// +/// Accepts arguments in 2 forms: +/// duration!(myfunc()) +/// duration!(myfunc(), message) +#[allow(unused_macros)] +macro_rules! duration_info { + ($target: expr) => { + $crate::macros::duration!($target, "", tracing::Level::INFO) + }; + ($target: expr, $message: expr) => { + $crate::macros::duration!($target, $message, tracing::Level::INFO) + }; +} + +/// executes an expression, times duration, and emits debug! message +/// +/// Accepts arguments in 2 forms: +/// duration!(myfunc()) +/// duration!(myfunc(), message) +#[allow(unused_macros)] +macro_rules! duration_debug { + ($target: expr) => { + $crate::macros::duration!($target, "", tracing::Level::DEBUG) + }; + ($target: expr, $message: expr) => { + $crate::macros::duration!($target, $message, tracing::Level::DEBUG) + }; +} + +/// executes an async expression, times duration, and emits trace! message +/// +/// Accepts arguments in 3 forms: +/// duration!(myfunc()) +/// duration!(myfunc(), message) +/// duration!(myfunc(), message, trace_level) +#[allow(unused_macros)] +macro_rules! duration_async { + ($target: expr, $message: expr, $lvl: expr) => {{ + let (output, duration) = $crate::time_fn_call_async({ $target }).await; + let msg = format!( + "at {}:{}{}\n-- executed expression --\n{}\n -- duration: {} secs --", + file!(), + line!(), + if $message.len() > 0 { + format! {"\n{}", $message} + } else { + "".to_string() + }, + stringify!($target), + duration + ); + match $lvl { + tracing::Level::INFO => tracing::info!("{}", msg), + tracing::Level::TRACE => tracing::trace!("{}", msg), + tracing::Level::DEBUG => tracing::trace!("{}", msg), + tracing::Level::WARN => tracing::warn!("{}", msg), + tracing::Level::ERROR => tracing::error!("{}", msg), + } + output + }}; + ($target: expr, $message: expr) => { + $crate::macros::duration_async!($target, $message, tracing::Level::TRACE) + }; + ($target: expr) => { + $crate::macros::duration_async!($target, "", tracing::Level::TRACE) + }; +} + +/// executes an async expression, times duration, and emits info! message +/// +/// Accepts arguments in 2 forms: +/// duration!(myfunc()) +/// duration!(myfunc(), message) +#[allow(unused_macros)] +macro_rules! duration_async_info { + ($target: expr) => { + $crate::macros::duration_async!($target, "", tracing::Level::INFO) + }; + ($target: expr, $message: expr) => { + $crate::macros::duration_async!($target, $message, tracing::Level::INFO) + }; +} + +/// executes an async expression, times duration, and emits debug! message +/// +/// Accepts arguments in 2 forms: +/// duration!(myfunc()) +/// duration!(myfunc(), message) +#[allow(unused_macros)] +macro_rules! duration_async_debug { + ($target: expr) => { + $crate::macros::duration_async!($target, "", tracing::Level::DEBUG) + }; + ($target: expr, $message: expr) => { + $crate::macros::duration_async!($target, $message, tracing::Level::DEBUG) + }; +} + +// These allow the macros to be used as +// use crate::macros::xxxxx; +// +// see: https://stackoverflow.com/a/67140319/10087197 + +#[allow(unused_imports)] +pub(crate) use duration; +#[allow(unused_imports)] +pub(crate) use duration_async; +#[allow(unused_imports)] +pub(crate) use duration_async_debug; +#[allow(unused_imports)] +pub(crate) use duration_async_info; +#[allow(unused_imports)] +pub(crate) use duration_debug; +#[allow(unused_imports)] +pub(crate) use duration_info; + +#[cfg(test)] +mod test { + + use super::*; + use tracing::Level; + + fn fibonacci(n: u32) -> u32 { + match n { + 0 => 1, + 1 => 1, + _ => fibonacci(n - 1) + fibonacci(n - 2), + } + } + + async fn fibonacci_async(n: u32) -> u32 { + match n { + 0 => 1, + 1 => 1, + _ => fibonacci(n - 1) + fibonacci(n - 2), + } + } + + #[test] + fn duration_tests() { + duration!(fibonacci(1)); + duration!(fibonacci(2), "fibonacci - 2".to_string()); + duration!(fibonacci(3), "fibonacci - 3", Level::INFO); + + duration_info!(fibonacci(4)); + duration_info!(fibonacci(5), "fibonacci - 5"); + duration_info!(fibonacci(6), "fibonacci - 6".to_string()); + + duration_debug!(fibonacci(7)); + duration_debug!(fibonacci(8), "fibonacci - 8"); + duration_debug!(fibonacci(9), "fibonacci - 9".to_string()); + } + + #[tokio::test] + async fn duration_async_tests() { + duration_async!(fibonacci_async(1)); + duration_async!(fibonacci_async(2), "fibonacci_async - 2".to_string()); + duration_async!(fibonacci_async(3), "fibonacci_async - 3", Level::INFO); + + duration_async_info!(fibonacci_async(4)); + duration_async_info!(fibonacci_async(5), "fibonacci_async - 5"); + duration_async_info!(fibonacci_async(6), "fibonacci_async - 6".to_string()); + + duration_async_debug!(fibonacci_async(7)); + duration_async_debug!(fibonacci_async(8), "fibonacci_async - 8"); + duration_async_debug!(fibonacci_async(9), "fibonacci_async - 9".to_string()); + } +} diff --git a/src/mine_loop.rs b/src/mine_loop.rs index e398f766..b5f73a14 100644 --- a/src/mine_loop.rs +++ b/src/mine_loop.rs @@ -19,8 +19,8 @@ use crate::models::state::wallet::utxo_notification_pool::{ExpectedUtxo, UtxoNot use crate::models::state::wallet::WalletSecret; use crate::models::state::{GlobalState, GlobalStateLock}; use crate::prelude::twenty_first; +use crate::util_types::mutator_set::commit; use crate::util_types::mutator_set::mutator_set_accumulator::MutatorSetAccumulator; -use crate::util_types::mutator_set::mutator_set_trait::{commit, MutatorSet}; use anyhow::{Context, Result}; use futures::channel::oneshot; use num_traits::identities::Zero; @@ -156,7 +156,11 @@ async fn mine_block( ); let new_block_info = NewBlockFound { - block: Box::new(Block::new(block_header, block_body, None)), + block: Box::new(Block::new( + block_header, + block_body, + Block::mk_std_block_type(None), + )), coinbase_utxo_info: Box::new(coinbase_utxo_info), }; @@ -488,7 +492,7 @@ mod mine_loop_tests { let block_template_empty_mempool = Block::new( block_header_template_empty_mempool, block_body_empty_mempool, - None, + Block::mk_std_block_type(None), ); assert!( block_template_empty_mempool.is_valid(&genesis_block, now), @@ -544,7 +548,11 @@ mod mine_loop_tests { transaction_non_empty_mempool, now + Timestamp::months(7), ); - let block_template_non_empty_mempool = Block::new(block_header_template, block_body, None); + let block_template_non_empty_mempool = Block::new( + block_header_template, + block_body, + Block::mk_std_block_type(None), + ); assert!( block_template_non_empty_mempool.is_valid( &genesis_block, diff --git a/src/models/blockchain/block/mod.rs b/src/models/blockchain/block/mod.rs index 75453c9e..6ce3342c 100644 --- a/src/models/blockchain/block/mod.rs +++ b/src/models/blockchain/block/mod.rs @@ -39,7 +39,7 @@ use self::block_header::{ use self::block_height::BlockHeight; use self::block_kernel::BlockKernel; use self::mutator_set_update::MutatorSetUpdate; -use self::transfer_block::TransferBlock; +use self::transfer_block::{ProofType, TransferBlock}; use super::transaction::transaction_kernel::TransactionKernel; use super::transaction::utxo::Utxo; use super::transaction::validity::TransactionValidationLogic; @@ -49,18 +49,23 @@ use super::type_scripts::time_lock::TimeLock; use crate::models::blockchain::shared::Hash; use crate::models::state::wallet::address::generation_address::{self, ReceivingAddress}; use crate::models::state::wallet::WalletSecret; +use crate::util_types::mutator_set::commit; use crate::util_types::mutator_set::mutator_set_accumulator::MutatorSetAccumulator; -use crate::util_types::mutator_set::mutator_set_trait::{commit, MutatorSet}; + +/// All blocks have proofs except the genesis block +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, BFieldCodec, GetSize)] +pub enum BlockType { + Genesis, + Standard(ProofType), +} #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, BFieldCodec, GetSize)] pub struct Block { /// Everything but the proof pub kernel: BlockKernel, - /// All blocks have proofs except: - /// - the genesis block - /// - blocks being generated - pub proof: Option, + /// type of block: Genesis, or Standard + pub block_type: BlockType, } impl From for Block { @@ -70,24 +75,24 @@ impl From for Block { header: t_block.header, body: t_block.body, }, - proof: Some(t_block.proof), + block_type: BlockType::Standard(t_block.proof_type), } } } impl From for TransferBlock { fn from(block: Block) -> Self { - let proof = match block.proof { - Some(p) => p, - None => { - error!("In order to be transferred, a Block must have a non-None proof field."); + let proof_type = match block.block_type { + BlockType::Standard(pt) => pt, + BlockType::Genesis => { + error!("The Genesis block cannot be transferred"); panic!() } }; Self { header: block.kernel.header, body: block.kernel.body, - proof, + proof_type, } } } @@ -142,7 +147,7 @@ impl Block { timestamp: network.launch_date(), public_announcements: vec![], coinbase: Some(total_premine_amount), - mutator_set_hash: MutatorSetAccumulator::new().hash(), + mutator_set_hash: MutatorSetAccumulator::default().hash(), }, witness: TransactionValidationLogic { vast: ValidityTree { @@ -197,7 +202,7 @@ impl Block { difficulty: MINIMUM_DIFFICULTY.into(), }; - Self::new(header, body, None) + Self::new(header, body, BlockType::Genesis) } fn premine_distribution( @@ -230,16 +235,30 @@ impl Block { utxos } - pub fn new(header: BlockHeader, body: BlockBody, proof: Option) -> Self { + pub fn new(header: BlockHeader, body: BlockBody, block_type: BlockType) -> Self { Self { kernel: BlockKernel { body, header }, - proof, + block_type, } } + /// helper fn to generate a BlockType::Standard enum variant representing a standard Block (non-genesis). + /// + /// note: This consolidates creation of ProofType::Unimplemented + /// into one place, so that once all Proofs are implemented we + /// can easily remove ProofType::Unimplemented. We will + /// still need to make this proof param non optional though. + pub fn mk_std_block_type(proof: Option) -> BlockType { + let proof_type = match proof { + Some(p) => ProofType::Proof(p), + None => ProofType::Unimplemented, + }; + BlockType::Standard(proof_type) + } + /// Merge a transaction into this block's transaction. /// The mutator set data must be valid in all inputs. - pub fn accumulate_transaction( + pub async fn accumulate_transaction( &mut self, transaction: Transaction, previous_mutator_set_accumulator: &MutatorSetAccumulator, @@ -380,7 +399,6 @@ impl Block { .kernel .body .mutator_set_accumulator - .kernel .can_remove(removal_record) { warn!("Removal record cannot be removed from mutator set"); @@ -550,21 +568,17 @@ mod block_tests { use crate::{ config_models::network::Network, + database::storage::storage_schema::SimpleRustyStorage, + database::NeptuneLevelDb, models::{ blockchain::transaction::PublicAnnouncement, state::wallet::WalletSecret, state::UtxoReceiverData, }, tests::shared::{get_mock_global_state, make_mock_block, make_mock_block_with_valid_pow}, + util_types::mutator_set::archival_mmr::ArchivalMmr, }; use strum::IntoEnumIterator; - use tasm_lib::twenty_first::{ - storage::level_db::DB, - util_types::{ - emojihash_trait::Emojihash, - mmr::archival_mmr::ArchivalMmr, - storage_schema::{SimpleRustyStorage, StorageWriter}, - }, - }; + use tasm_lib::twenty_first::util_types::emojihash_trait::Emojihash; use super::*; @@ -621,7 +635,9 @@ mod block_tests { .unwrap(); assert!(new_tx.is_valid(), "Created tx must be valid"); - block_1.accumulate_transaction(new_tx, &genesis_block.kernel.body.mutator_set_accumulator); + block_1 + .accumulate_transaction(new_tx, &genesis_block.kernel.body.mutator_set_accumulator) + .await; assert!( block_1.is_valid(&genesis_block, now + seven_months), "Block 1 must be valid after adding a transaction; previous mutator set hash: {} and next mutator set hash: {}", @@ -733,19 +749,20 @@ mod block_tests { assert!(!block_1.is_valid(&genesis_block, now)); } - #[test] - fn can_prove_block_ancestry() { + #[tokio::test] + async fn can_prove_block_ancestry() { let mut rng = thread_rng(); let network = Network::RegTest; let genesis_block = Block::genesis_block(network); let mut blocks = vec![]; blocks.push(genesis_block.clone()); - let db = DB::open_new_test_database(true, None, None, None).unwrap(); + let db = NeptuneLevelDb::open_new_test_database(true, None, None, None) + .await + .unwrap(); let mut storage = SimpleRustyStorage::new(db); - storage.restore_or_new(); - let ammr_storage = storage.schema.new_vec::("ammr-blocks-0"); - let mut ammr: ArchivalMmr = ArchivalMmr::new(ammr_storage); - ammr.append(genesis_block.hash()); + let ammr_storage = storage.schema.new_vec::("ammr-blocks-0").await; + let mut ammr: ArchivalMmr = ArchivalMmr::new(ammr_storage).await; + ammr.append(genesis_block.hash()).await; let mut mmra = MmrAccumulator::new(vec![genesis_block.hash()]); for i in 0..55 { @@ -754,9 +771,12 @@ mod block_tests { let (new_block, _, _) = make_mock_block(blocks.last().unwrap(), None, recipient_address, rng.gen()); if i != 54 { - ammr.append(new_block.hash()); + ammr.append(new_block.hash()).await; mmra.append(new_block.hash()); - assert_eq!(ammr.to_accumulator().bag_peaks(), mmra.bag_peaks()); + assert_eq!( + ammr.to_accumulator_async().await.bag_peaks(), + mmra.bag_peaks() + ); } blocks.push(new_block); } @@ -766,7 +786,7 @@ mod block_tests { let index = thread_rng().gen_range(0..blocks.len() - 1); let block_digest = blocks[index].hash(); - let (membership_proof, _) = ammr.prove_membership(index as u64); + let membership_proof = ammr.prove_membership_async(index as u64).await; let (v, _) = membership_proof.verify( &last_block_mmra.get_peaks(), block_digest, diff --git a/src/models/blockchain/block/mutator_set_update.rs b/src/models/blockchain/block/mutator_set_update.rs index b54f040c..236e570a 100644 --- a/src/models/blockchain/block/mutator_set_update.rs +++ b/src/models/blockchain/block/mutator_set_update.rs @@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize}; use crate::util_types::mutator_set::{ addition_record::AdditionRecord, mutator_set_accumulator::MutatorSetAccumulator, - mutator_set_trait::MutatorSet, removal_record::RemovalRecord, + removal_record::RemovalRecord, }; #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)] @@ -42,12 +42,9 @@ impl MutatorSetUpdate { let mut cloned_removals = self.removals.clone(); let mut applied_removal_records = cloned_removals.iter_mut().rev().collect::>(); for addition_record in self.additions.iter() { - RemovalRecord::batch_update_from_addition( - &mut applied_removal_records, - &mut ms_accumulator.kernel, - ); + RemovalRecord::batch_update_from_addition(&mut applied_removal_records, ms_accumulator); - RemovalRecord::batch_update_from_addition(removal_records, &mut ms_accumulator.kernel); + RemovalRecord::batch_update_from_addition(removal_records, ms_accumulator); ms_accumulator.add(addition_record); } diff --git a/src/models/blockchain/block/transfer_block.rs b/src/models/blockchain/block/transfer_block.rs index 4e8a7028..679afe32 100644 --- a/src/models/blockchain/block/transfer_block.rs +++ b/src/models/blockchain/block/transfer_block.rs @@ -1,7 +1,15 @@ +use get_size::GetSize; use serde::{Deserialize, Serialize}; use tasm_lib::triton_vm::proof::Proof; use super::{block_body::BlockBody, block_header::BlockHeader}; +use crate::models::blockchain::block::BFieldCodec; + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Eq, BFieldCodec, GetSize)] +pub enum ProofType { + Unimplemented, // temporary, can should be removed once all Proof's are implemented. + Proof(Proof), +} /// Data structure for communicating blocks with peers. The hash digest is not /// communicated such that the receiver is forced to calculate it themselves. @@ -9,5 +17,5 @@ use super::{block_body::BlockBody, block_header::BlockHeader}; pub struct TransferBlock { pub header: BlockHeader, pub body: BlockBody, - pub proof: Proof, + pub proof_type: ProofType, } diff --git a/src/models/blockchain/transaction/mod.rs b/src/models/blockchain/transaction/mod.rs index 37c1ce29..4e539f3b 100644 --- a/src/models/blockchain/transaction/mod.rs +++ b/src/models/blockchain/transaction/mod.rs @@ -35,7 +35,6 @@ use super::type_scripts::TypeScript; use crate::util_types::mutator_set::addition_record::AdditionRecord; use crate::util_types::mutator_set::ms_membership_proof::MsMembershipProof; use crate::util_types::mutator_set::mutator_set_accumulator::MutatorSetAccumulator; -use crate::util_types::mutator_set::mutator_set_trait::MutatorSet; use crate::util_types::mutator_set::removal_record::RemovalRecord; #[derive( @@ -95,16 +94,10 @@ impl Transaction { // Apply all addition records in the block for block_addition_record in block_addition_records { // Batch update block's removal records to keep them valid after next addition - RemovalRecord::batch_update_from_addition( - &mut block_removal_records, - &mut msa_state.kernel, - ); + RemovalRecord::batch_update_from_addition(&mut block_removal_records, &msa_state); // Batch update transaction's removal records - RemovalRecord::batch_update_from_addition( - &mut transaction_removal_records, - &mut msa_state.kernel, - ); + RemovalRecord::batch_update_from_addition(&mut transaction_removal_records, &msa_state); // Batch update primitive witness membership proofs let membership_proofs = &mut primitive_witness @@ -120,7 +113,7 @@ impl Transaction { MsMembershipProof::batch_update_from_addition( membership_proofs, &own_items, - &msa_state.kernel, + &msa_state, &block_addition_record, ) .expect("MS MP update from add must succeed in wallet handler"); @@ -405,12 +398,12 @@ impl Transaction { self.kernel .inputs .iter() - .all(|rr| rr.validate(&mutator_set_accumulator.kernel)) + .all(|rr| rr.validate(mutator_set_accumulator)) } /// Verify the transaction directly from the primitive witness, without proofs or /// decomposing into subclaims. - pub fn validate_primitive_witness(&self, primitive_witness: &PrimitiveWitness) -> bool { + pub async fn validate_primitive_witness(&self, primitive_witness: &PrimitiveWitness) -> bool { // verify lock scripts for (lock_script, secret_input) in primitive_witness .input_lock_scripts @@ -614,7 +607,7 @@ mod witness_tests { lock_script_witnesses: vec![], input_membership_proofs: vec![], output_utxos: SaltedUtxos::empty(), - mutator_set_accumulator: MutatorSetAccumulator::new(), + mutator_set_accumulator: MutatorSetAccumulator::default(), kernel: empty_kernel, }; @@ -636,7 +629,7 @@ mod transaction_tests { blockchain::type_scripts::neptune_coins::NeptuneCoins, consensus::timestamp::Timestamp, }, tests::shared::make_mock_transaction, - util_types::mutator_set::mutator_set_trait::commit, + util_types::mutator_set::commit, }; #[traced_test] diff --git a/src/models/blockchain/transaction/primitive_witness.rs b/src/models/blockchain/transaction/primitive_witness.rs index bfb4c05e..bcdcae58 100644 --- a/src/models/blockchain/transaction/primitive_witness.rs +++ b/src/models/blockchain/transaction/primitive_witness.rs @@ -18,23 +18,23 @@ use tasm_lib::{ Digest, }; -use crate::models::{ - blockchain::type_scripts::{native_currency::NativeCurrency, neptune_coins::NeptuneCoins}, - consensus::{tasm::program::ConsensusProgram, timestamp::Timestamp}, -}; use crate::{ - models::{blockchain::type_scripts::TypeScript, state::wallet::address::generation_address}, + models::{ + blockchain::type_scripts::TypeScript, consensus::timestamp::Timestamp, + state::wallet::address::generation_address, + }, util_types::mutator_set::{ ms_membership_proof::MsMembershipProof, mutator_set_accumulator::MutatorSetAccumulator, }, }; use crate::{ - util_types::mutator_set::{ - msa_and_records::MsaAndRecords, - mutator_set_trait::{commit, MutatorSet}, + models::{ + blockchain::type_scripts::{native_currency::NativeCurrency, neptune_coins::NeptuneCoins}, + consensus::tasm::program::ConsensusProgram, }, - Hash, + util_types::mutator_set::commit, }; +use crate::{util_types::mutator_set::msa_and_records::MsaAndRecords, Hash}; use super::{ transaction_kernel::TransactionKernel, diff --git a/src/models/blockchain/transaction/validity.rs b/src/models/blockchain/transaction/validity.rs index 6928517f..1843171f 100644 --- a/src/models/blockchain/transaction/validity.rs +++ b/src/models/blockchain/transaction/validity.rs @@ -15,7 +15,6 @@ pub mod tasm; pub mod typescripts_halt; use crate::models::blockchain::transaction; use crate::util_types::mutator_set::mutator_set_accumulator::MutatorSetAccumulator; -use crate::util_types::mutator_set::mutator_set_trait::MutatorSet; use get_size::GetSize; use serde::{Deserialize, Serialize}; diff --git a/src/models/blockchain/transaction/validity/removal_records_integrity.rs b/src/models/blockchain/transaction/validity/removal_records_integrity.rs index 5e8b6c54..48c3f5ff 100644 --- a/src/models/blockchain/transaction/validity/removal_records_integrity.rs +++ b/src/models/blockchain/transaction/validity/removal_records_integrity.rs @@ -3,8 +3,8 @@ use crate::models::consensus::mast_hash::MastHash; use crate::models::consensus::tasm::program::ConsensusProgram; use crate::prelude::{triton_vm, twenty_first}; use crate::util_types::mutator_set::addition_record::AdditionRecord; -use crate::util_types::mutator_set::mutator_set_kernel::get_swbf_indices; -use crate::util_types::mutator_set::mutator_set_trait::commit; +use crate::util_types::mutator_set::commit; +use crate::util_types::mutator_set::get_swbf_indices; use crate::util_types::mutator_set::removal_record::{AbsoluteIndexSet, RemovalRecord}; use crate::twenty_first::util_types::mmr::shared_basic::leaf_index_to_mt_index_and_peak_index; @@ -67,17 +67,12 @@ impl RemovalRecordsIntegrityWitness { input_utxos: primitive_witness.input_utxos.utxos.clone(), membership_proofs: primitive_witness.input_membership_proofs.clone(), kernel: primitive_witness.kernel.clone(), - aocl: primitive_witness - .mutator_set_accumulator - .kernel - .aocl - .clone(), + aocl: primitive_witness.mutator_set_accumulator.aocl.clone(), swbfi: primitive_witness .mutator_set_accumulator - .kernel .swbf_inactive .clone(), - swbfa_hash: Hash::hash(&primitive_witness.mutator_set_accumulator.kernel.swbf_active), + swbfa_hash: Hash::hash(&primitive_witness.mutator_set_accumulator.swbf_active), } } } diff --git a/src/models/blockchain/transaction/validity/tasm/compute_canonical_commitment.rs b/src/models/blockchain/transaction/validity/tasm/compute_canonical_commitment.rs index 4ee18158..70c8765c 100644 --- a/src/models/blockchain/transaction/validity/tasm/compute_canonical_commitment.rs +++ b/src/models/blockchain/transaction/validity/tasm/compute_canonical_commitment.rs @@ -17,9 +17,7 @@ use tasm_lib::traits::function::{Function, FunctionInitialState}; use triton_vm::prelude::{triton_asm, BFieldElement, Digest}; use twenty_first::shared_math::bfield_codec::BFieldCodec; -use crate::util_types::mutator_set::{ - ms_membership_proof::MsMembershipProof, mutator_set_trait::commit, -}; +use crate::util_types::mutator_set::{commit, ms_membership_proof::MsMembershipProof}; /// Compute a canonical commitment from an item and its membership proof. #[derive(Debug, Clone)] diff --git a/src/models/blockchain/transaction/validity/tasm/compute_indices.rs b/src/models/blockchain/transaction/validity/tasm/compute_indices.rs index be64027b..30ece47f 100644 --- a/src/models/blockchain/transaction/validity/tasm/compute_indices.rs +++ b/src/models/blockchain/transaction/validity/tasm/compute_indices.rs @@ -254,7 +254,7 @@ mod tests { use triton_vm::prelude::NonDeterminism; use twenty_first::shared_math::bfield_codec::BFieldCodec; - use crate::util_types::mutator_set::mutator_set_kernel::get_swbf_indices; + use crate::util_types::mutator_set::get_swbf_indices; use super::*; diff --git a/src/models/blockchain/transaction/validity/tasm/removal_records_integrity.rs b/src/models/blockchain/transaction/validity/tasm/removal_records_integrity.rs index b940ad8d..e7879160 100644 --- a/src/models/blockchain/transaction/validity/tasm/removal_records_integrity.rs +++ b/src/models/blockchain/transaction/validity/tasm/removal_records_integrity.rs @@ -1,5 +1,7 @@ use crate::models::consensus::mast_hash::MastHash; use crate::prelude::{triton_vm, twenty_first}; +use crate::util_types::mutator_set::commit; +use crate::util_types::mutator_set::get_swbf_indices; use std::collections::HashSet; @@ -39,10 +41,7 @@ use crate::{ validity::tasm::transaction_kernel_mast_hash::TransactionKernelMastHash, }, }, - util_types::mutator_set::{ - mutator_set_kernel::get_swbf_indices, mutator_set_trait::commit, - removal_record::AbsoluteIndexSet, - }, + util_types::mutator_set::removal_record::AbsoluteIndexSet, }; use tasm_lib::memory::push_ram_to_stack::PushRamToStack; @@ -505,7 +504,7 @@ mod bench { let nondeterminism = NonDeterminism::default().with_ram(memory); bench_and_profile_program::( - "tasm_neptune_transaction_removal_records_integrity".to_string(), + "tasm_neptune_transaction_removal_records_integrity", BenchmarkCase::CommonCase, &public_input, &nondeterminism, diff --git a/src/models/blockchain/transaction/validity/tasm/transaction_kernel_mast_hash.rs b/src/models/blockchain/transaction/validity/tasm/transaction_kernel_mast_hash.rs index 903d78a2..f1f86ba6 100644 --- a/src/models/blockchain/transaction/validity/tasm/transaction_kernel_mast_hash.rs +++ b/src/models/blockchain/transaction/validity/tasm/transaction_kernel_mast_hash.rs @@ -75,16 +75,16 @@ impl BasicSnippet for TransactionKernelMastHash { fn code(&self, library: &mut Library) -> Vec { let entrypoint = self.entrypoint(); let new_list = library.import(Box::new(New { - data_type: DataType::Digest, + element_type: DataType::Digest, })); let get_element = library.import(Box::new(Get { element_type: DataType::Digest, })); let set_element = library.import(Box::new(Set { - data_type: DataType::Digest, + element_type: DataType::Digest, })); let set_length = library.import(Box::new(SetLength { - data_type: DataType::Digest, + element_type: DataType::Digest, })); let kernel_to_inputs_with_size = tasm_lib::field_with_size!(TransactionKernel::inputs); diff --git a/src/models/blockchain/transaction/validity/tasm/verify_aocl_membership.rs b/src/models/blockchain/transaction/validity/tasm/verify_aocl_membership.rs index bc8efa4e..6067ba4c 100644 --- a/src/models/blockchain/transaction/validity/tasm/verify_aocl_membership.rs +++ b/src/models/blockchain/transaction/validity/tasm/verify_aocl_membership.rs @@ -1,27 +1,16 @@ use crate::prelude::{triton_vm, twenty_first}; -use std::collections::HashMap; - -use crate::util_types::mutator_set::ms_membership_proof::pseudorandom_mutator_set_membership_proof; use crate::{ models::blockchain::shared::Hash, util_types::mutator_set::ms_membership_proof::MsMembershipProof, }; -use itertools::Itertools; -use rand::RngCore; -use rand::{rngs::StdRng, Rng, SeedableRng}; use tasm_lib::data_type::DataType; -use tasm_lib::empty_stack; use tasm_lib::library::Library; use tasm_lib::mmr::verify_from_memory::MmrVerifyFromMemory; use tasm_lib::traits::basic_snippet::BasicSnippet; -use tasm_lib::traits::function::{Function, FunctionInitialState}; -use triton_vm::{ - prelude::{BFieldElement, Digest}, - triton_asm, -}; -use twenty_first::shared_math::bfield_codec::BFieldCodec; -use twenty_first::test_shared::mmr::get_rustyleveldb_ammr_from_digests; + +use triton_vm::triton_asm; + use twenty_first::util_types::mmr::mmr_membership_proof::MmrMembershipProof; /// Given a membership proof and a canonical commitment, verify membership in the AOCL. @@ -116,135 +105,165 @@ impl BasicSnippet for VerifyAoclMembership { } } -impl Function for VerifyAoclMembership { - fn rust_shadow( - &self, - stack: &mut Vec, - memory: &mut std::collections::HashMap, - ) { - // read arguments from stack - // *peaks leaf_count_hi leaf_count_lo [bu ff er] *msmp c4 c3 c2 c1 c0 - let c0 = stack.pop().unwrap(); - let c1 = stack.pop().unwrap(); - let c2 = stack.pop().unwrap(); - let c3 = stack.pop().unwrap(); - let c4 = stack.pop().unwrap(); - let leaf = Digest::new([c0, c1, c2, c3, c4]); - let mp_ptr = stack.pop().unwrap(); - let _er = stack.pop().unwrap(); - let _ff = stack.pop().unwrap(); - let _bu = stack.pop().unwrap(); - let leaf_count_lo = stack.pop().unwrap().value(); - let leaf_count_hi = stack.pop().unwrap().value(); - let leaf_count = (leaf_count_hi << 32) ^ leaf_count_lo; - let peaks_ptr = stack.pop().unwrap(); - - // read peaks list - let peaks_size = memory - .get(&(peaks_ptr - BFieldElement::new(1))) - .unwrap() - .value(); - println!("peaks_size: {peaks_size}"); - let mut peaks_list_encoding = vec![]; - for i in 0..peaks_size { - peaks_list_encoding.push(*memory.get(&(peaks_ptr + BFieldElement::new(i))).unwrap()); - } - let peaks = *Vec::::decode(&peaks_list_encoding).unwrap(); - println!("peaks: {}", peaks.iter().join(",")); - - // read authentication path - let mp_size = memory - .get(&(mp_ptr - BFieldElement::new(1))) - .unwrap() - .value(); - println!("mp_size: {mp_size}"); - let mut mp_encoding = vec![]; - for i in 0..mp_size { - mp_encoding.push(*memory.get(&(mp_ptr + BFieldElement::new(i))).unwrap()); - } - let memproof = *MsMembershipProof::decode(&mp_encoding).unwrap(); - println!("memproof li: {}", memproof.auth_path_aocl.leaf_index); - println!( - "memproof ap: {}", - memproof.auth_path_aocl.authentication_path.iter().join(",") - ); - - // verify - let validation_result = memproof.auth_path_aocl.verify(&peaks, leaf, leaf_count).0; - println!("RS validation_result: {validation_result}"); - - // repopulate stack - // _ *peaks leaf_count_hi leaf_count_lo [bu ff er] validation_result - stack.push(peaks_ptr); - stack.push(BFieldElement::new(leaf_count_hi)); - stack.push(BFieldElement::new(leaf_count_lo)); - stack.push(_bu); - stack.push(_ff); - stack.push(_er); - stack.push(BFieldElement::new(validation_result as u64)); - } - - fn pseudorandom_initial_state( - &self, - seed: [u8; 32], - _bench_case: Option, - ) -> FunctionInitialState { - let mut rng: StdRng = SeedableRng::from_seed(seed); - let num_leafs = rng.gen_range(1..100); - let leafs = (0..num_leafs).map(|_| rng.gen::()).collect_vec(); - let mmr = get_rustyleveldb_ammr_from_digests::(leafs); - - let leaf_index = rng.next_u64() % num_leafs; - let leaf = mmr.get_leaf(leaf_index); - let (mmr_mp, peaks) = mmr.prove_membership(leaf_index); - let mut msmp = pseudorandom_mutator_set_membership_proof(rng.gen()); - msmp.auth_path_aocl = mmr_mp; - - // populate memory - let mut memory: HashMap = HashMap::new(); - let mut address = BFieldElement::new(rng.next_u64() % (1 << 20)); - - let peaks_si_ptr = address; - memory.insert(address, BFieldElement::new(peaks.encode().len() as u64)); - address.increment(); - for v in peaks.encode().iter() { - memory.insert(address, *v); - address.increment(); - } - - let msmp_si_ptr = address; - memory.insert(msmp_si_ptr, BFieldElement::new(msmp.encode().len() as u64)); - address.increment(); - for v in msmp.encode().iter() { - memory.insert(address, *v); - address.increment(); - } - - // populate stack - // *peaks leaf_count_hi leaf_count_lo [bu ff er] *msmp c4 c3 c2 c1 c0 - let mut stack = empty_stack(); - stack.push(peaks_si_ptr + BFieldElement::new(1)); - stack.push(BFieldElement::new(num_leafs >> 32)); - stack.push(BFieldElement::new(num_leafs & u32::MAX as u64)); - stack.push(rng.gen()); - stack.push(rng.gen()); - stack.push(rng.gen()); - stack.push(msmp_si_ptr + BFieldElement::new(1)); - stack.push(leaf.values()[4]); - stack.push(leaf.values()[3]); - stack.push(leaf.values()[2]); - stack.push(leaf.values()[1]); - stack.push(leaf.values()[0]); - - FunctionInitialState { stack, memory } - } -} - #[cfg(test)] mod tests { use super::*; + + use crate::util_types::mutator_set::ms_membership_proof::pseudorandom_mutator_set_membership_proof; + + use rand::RngCore; + use rand::{rngs::StdRng, Rng, SeedableRng}; + use tasm_lib::empty_stack; use tasm_lib::traits::function::ShadowedFunction; + use tasm_lib::traits::function::{Function, FunctionInitialState}; use tasm_lib::traits::rust_shadow::RustShadow; + use tasm_lib::triton_vm::prelude::BFieldCodec; + + use itertools::Itertools; + + use std::collections::HashMap; + + use triton_vm::prelude::{BFieldElement, Digest}; + + use crate::util_types::mutator_set::archival_mmr::mmr_test::mock; + + impl Function for VerifyAoclMembership { + fn rust_shadow( + &self, + stack: &mut Vec, + memory: &mut std::collections::HashMap, + ) { + // read arguments from stack + // *peaks leaf_count_hi leaf_count_lo [bu ff er] *msmp c4 c3 c2 c1 c0 + let c0 = stack.pop().unwrap(); + let c1 = stack.pop().unwrap(); + let c2 = stack.pop().unwrap(); + let c3 = stack.pop().unwrap(); + let c4 = stack.pop().unwrap(); + let leaf = Digest::new([c0, c1, c2, c3, c4]); + let mp_ptr = stack.pop().unwrap(); + let _er = stack.pop().unwrap(); + let _ff = stack.pop().unwrap(); + let _bu = stack.pop().unwrap(); + let leaf_count_lo = stack.pop().unwrap().value(); + let leaf_count_hi = stack.pop().unwrap().value(); + let leaf_count = (leaf_count_hi << 32) ^ leaf_count_lo; + let peaks_ptr = stack.pop().unwrap(); + + // read peaks list + let peaks_size = memory + .get(&(peaks_ptr - BFieldElement::new(1))) + .unwrap() + .value(); + println!("peaks_size: {peaks_size}"); + let mut peaks_list_encoding = vec![]; + for i in 0..peaks_size { + peaks_list_encoding + .push(*memory.get(&(peaks_ptr + BFieldElement::new(i))).unwrap()); + } + let peaks = *Vec::::decode(&peaks_list_encoding).unwrap(); + println!("peaks: {}", peaks.iter().join(",")); + + // read authentication path + let mp_size = memory + .get(&(mp_ptr - BFieldElement::new(1))) + .unwrap() + .value(); + println!("mp_size: {mp_size}"); + let mut mp_encoding = vec![]; + for i in 0..mp_size { + mp_encoding.push(*memory.get(&(mp_ptr + BFieldElement::new(i))).unwrap()); + } + let memproof = *MsMembershipProof::decode(&mp_encoding).unwrap(); + println!("memproof li: {}", memproof.auth_path_aocl.leaf_index); + println!( + "memproof ap: {}", + memproof.auth_path_aocl.authentication_path.iter().join(",") + ); + + // verify + let validation_result = memproof.auth_path_aocl.verify(&peaks, leaf, leaf_count).0; + println!("RS validation_result: {validation_result}"); + + // repopulate stack + // _ *peaks leaf_count_hi leaf_count_lo [bu ff er] validation_result + stack.push(peaks_ptr); + stack.push(BFieldElement::new(leaf_count_hi)); + stack.push(BFieldElement::new(leaf_count_lo)); + stack.push(_bu); + stack.push(_ff); + stack.push(_er); + stack.push(BFieldElement::new(validation_result as u64)); + } + + fn pseudorandom_initial_state( + &self, + seed: [u8; 32], + _bench_case: Option, + ) -> FunctionInitialState { + async fn pseudorandom_initial_state_async(seed: [u8; 32]) -> FunctionInitialState { + let mut rng: StdRng = SeedableRng::from_seed(seed); + let num_leafs = rng.gen_range(1..100); + let leafs = (0..num_leafs).map(|_| rng.gen::()).collect_vec(); + + let mmr = mock::get_ammr_from_digests::(leafs).await; + + let leaf_index = rng.next_u64() % num_leafs; + let leaf = mmr.get_leaf_async(leaf_index).await; + let peaks = mmr.get_peaks().await; + let mmr_mp = mmr.prove_membership_async(leaf_index).await; + let mut msmp = pseudorandom_mutator_set_membership_proof(rng.gen()); + msmp.auth_path_aocl = mmr_mp; + + // populate memory + let mut memory: HashMap = HashMap::new(); + let mut address = BFieldElement::new(rng.next_u64() % (1 << 20)); + + let peaks_si_ptr = address; + memory.insert(address, BFieldElement::new(peaks.encode().len() as u64)); + address.increment(); + for v in peaks.encode().iter() { + memory.insert(address, *v); + address.increment(); + } + + let msmp_si_ptr = address; + memory.insert(msmp_si_ptr, BFieldElement::new(msmp.encode().len() as u64)); + address.increment(); + for v in msmp.encode().iter() { + memory.insert(address, *v); + address.increment(); + } + + // populate stack + // *peaks leaf_count_hi leaf_count_lo [bu ff er] *msmp c4 c3 c2 c1 c0 + let mut stack = empty_stack(); + stack.push(peaks_si_ptr + BFieldElement::new(1)); + stack.push(BFieldElement::new(num_leafs >> 32)); + stack.push(BFieldElement::new(num_leafs & u32::MAX as u64)); + stack.push(rng.gen()); + stack.push(rng.gen()); + stack.push(rng.gen()); + stack.push(msmp_si_ptr + BFieldElement::new(1)); + stack.push(leaf.values()[4]); + stack.push(leaf.values()[3]); + stack.push(leaf.values()[2]); + stack.push(leaf.values()[1]); + stack.push(leaf.values()[0]); + + FunctionInitialState { stack, memory } + } + + std::thread::scope(|s| { + s.spawn(|| { + let runtime = tokio::runtime::Runtime::new().unwrap(); + runtime.block_on(pseudorandom_initial_state_async(seed)) + }) + .join() + .unwrap() + }) + } + } #[test] fn test_verify_aocl_membership() { diff --git a/src/models/blockchain/type_scripts/native_currency.rs b/src/models/blockchain/type_scripts/native_currency.rs index 3c2b2ff4..37b19d1f 100644 --- a/src/models/blockchain/type_scripts/native_currency.rs +++ b/src/models/blockchain/type_scripts/native_currency.rs @@ -282,7 +282,7 @@ pub mod test { use super::*; - #[proptest] + #[proptest(cases = 20)] fn balanced_transaction_is_valid( #[strategy(1usize..=3)] _num_inputs: usize, #[strategy(1usize..=3)] _num_outputs: usize, @@ -303,7 +303,7 @@ pub mod test { ); } - #[proptest] + #[proptest(cases = 20)] fn unbalanced_transaction_without_coinbase_is_invalid( #[strategy(1usize..=3)] _num_inputs: usize, #[strategy(1usize..=3)] _num_outputs: usize, @@ -333,7 +333,7 @@ pub mod test { ); } - #[proptest] + #[proptest(cases = 20)] fn unbalanced_transaction_with_coinbase_is_invalid( #[strategy(1usize..=3)] _num_inputs: usize, #[strategy(1usize..=3)] _num_outputs: usize, diff --git a/src/models/blockchain/type_scripts/neptune_coins.rs b/src/models/blockchain/type_scripts/neptune_coins.rs index 41f9ac15..95591419 100644 --- a/src/models/blockchain/type_scripts/neptune_coins.rs +++ b/src/models/blockchain/type_scripts/neptune_coins.rs @@ -120,9 +120,7 @@ impl NeptuneCoins { // flip and recurse if we are dealing with negative numbers if sign == num_bigint::Sign::Minus { - let Some(positive_nau) = Self::from_nau(-nau) else { - return None; - }; + let positive_nau = Self::from_nau(-nau)?; return Some(Self(u128::MAX - positive_nau.0 + 1u128)); } diff --git a/src/models/blockchain/type_scripts/time_lock.rs b/src/models/blockchain/type_scripts/time_lock.rs index be25fa8f..897f5e3f 100644 --- a/src/models/blockchain/type_scripts/time_lock.rs +++ b/src/models/blockchain/type_scripts/time_lock.rs @@ -587,7 +587,7 @@ mod test { use super::TimeLockWitness; - #[proptest] + #[proptest(cases = 20)] fn test_unlocked( #[strategy(1usize..=3)] _num_inputs: usize, #[strategy(1usize..=3)] _num_outputs: usize, @@ -607,7 +607,7 @@ mod test { ); } - #[proptest] + #[proptest(cases = 20)] fn test_locked( #[strategy(1usize..=3)] _num_inputs: usize, #[strategy(1usize..=3)] _num_outputs: usize, @@ -629,7 +629,7 @@ mod test { ); } - #[proptest] + #[proptest(cases = 20)] fn test_released( #[strategy(1usize..=3)] _num_inputs: usize, #[strategy(1usize..=3)] _num_outputs: usize, diff --git a/src/models/consensus/tasm/environment.rs b/src/models/consensus/tasm/environment.rs index a3bd52a9..6a4ec910 100644 --- a/src/models/consensus/tasm/environment.rs +++ b/src/models/consensus/tasm/environment.rs @@ -13,11 +13,11 @@ use tasm_lib::{ }; thread_local! { - pub(super) static PUB_INPUT: RefCell> = RefCell::new(vec![]); - pub(super) static PUB_OUTPUT: RefCell> = RefCell::new(vec![]); + pub(super) static PUB_INPUT: RefCell> = const {RefCell::new(vec![])}; + pub(super) static PUB_OUTPUT: RefCell> = const { RefCell::new(vec![])}; - pub(super) static ND_INDIVIDUAL_TOKEN: RefCell> = RefCell::new(vec![]); - pub(super) static ND_DIGESTS: RefCell> = RefCell::new(vec![]); + pub(super) static ND_INDIVIDUAL_TOKEN: RefCell> = const{RefCell::new(vec![])}; + pub(super) static ND_DIGESTS: RefCell> = const{RefCell::new(vec![])}; pub(super) static ND_MEMORY: RefCell> = RefCell::new(HashMap::default()); diff --git a/src/models/state/archival_state.rs b/src/models/state/archival_state.rs index ac52036b..3e2dd353 100644 --- a/src/models/state/archival_state.rs +++ b/src/models/state/archival_state.rs @@ -1,6 +1,7 @@ use crate::config_models::network::Network; use crate::prelude::twenty_first; +use crate::database::storage::storage_schema::traits::*; use anyhow::Result; use memmap2::MmapOptions; use num_traits::Zero; @@ -13,20 +14,16 @@ use tokio::io::SeekFrom; use tracing::{debug, warn}; use twenty_first::amount::u32s::U32s; use twenty_first::shared_math::digest::Digest; -use twenty_first::storage::level_db::DB; -use twenty_first::util_types::mmr::mmr_trait::Mmr; -use twenty_first::util_types::storage_schema::traits::*; use super::shared::new_block_file_is_needed; use crate::config_models::data_directory::DataDirectory; -use crate::database::{create_db_if_missing, NeptuneLevelDb}; +use crate::database::{create_db_if_missing, NeptuneLevelDb, WriteBatchAsync}; use crate::models::blockchain::block::block_header::{BlockHeader, PROOF_OF_WORK_COUNT_U32_SIZE}; use crate::models::blockchain::block::{block_height::BlockHeight, Block}; use crate::models::database::{ BlockFileLocation, BlockIndexKey, BlockIndexValue, BlockRecord, FileRecord, LastFileRecord, }; use crate::util_types::mutator_set::addition_record::AdditionRecord; -use crate::util_types::mutator_set::mutator_set_trait::MutatorSet; use crate::util_types::mutator_set::removal_record::RemovalRecord; use crate::util_types::mutator_set::rusty_archival_mutator_set::RustyArchivalMutatorSet; @@ -101,8 +98,7 @@ impl ArchivalState { DataDirectory::create_dir_if_not_exists(&ms_db_dir_path).await?; let path = ms_db_dir_path.clone(); - let result = - tokio::task::spawn_blocking(move || DB::open(&path, &create_db_if_missing())).await?; + let result = NeptuneLevelDb::new(&path, &create_db_if_missing()).await; let db = match result { Ok(db) => db, @@ -119,8 +115,8 @@ impl ArchivalState { } }; - let mut archival_set = RustyArchivalMutatorSet::connect(db); - archival_set.restore_or_new(); + let mut archival_set = RustyArchivalMutatorSet::connect(db).await; + archival_set.restore_or_new().await; Ok(archival_set) } @@ -201,13 +197,13 @@ impl ArchivalState { // We could have populated the archival mutator set with the genesis block UTXOs earlier in // the setup, but we don't have the genesis block in scope before this function, so it makes // sense to do it here. - if archival_mutator_set.ams().kernel.aocl.is_empty() { + if archival_mutator_set.ams().aocl.is_empty().await { for addition_record in genesis_block.kernel.body.transaction.kernel.outputs.iter() { - archival_mutator_set.ams_mut().add(addition_record); + archival_mutator_set.ams_mut().add(addition_record).await; } let genesis_hash = genesis_block.hash(); - archival_mutator_set.set_sync_label(genesis_hash); - archival_mutator_set.persist(); + archival_mutator_set.set_sync_label(genesis_hash).await; + archival_mutator_set.persist().await; } Self { @@ -345,7 +341,12 @@ impl ArchivalState { )); } - self.block_index_db.batch_write(block_index_entries).await; + let mut batch = WriteBatchAsync::new(); + for (k, v) in block_index_entries.into_iter() { + batch.op_write(k, v); + } + + self.block_index_db.batch_write(batch).await; Ok(()) } @@ -645,7 +646,7 @@ impl ArchivalState { pub async fn update_mutator_set(&mut self, new_block: &Block) -> Result<()> { let (forwards, backwards) = { // Get the block digest that the mutator set was most recently synced to - let ms_block_sync_digest = self.archival_mutator_set.get_sync_label(); + let ms_block_sync_digest = self.archival_mutator_set.get_sync_label().await; // Find path from mutator set sync digest to new block. Optimize for the common case, // where the new block is the child block of block that the mutator set is synced to. @@ -692,19 +693,22 @@ impl ArchivalState { assert!( self.archival_mutator_set .ams_mut() - .add_is_reversible(addition_record), + .add_is_reversible(addition_record) + .await, "Addition record must be in sync with block being rolled back." ); self.archival_mutator_set .ams_mut() - .revert_add(addition_record); + .revert_add(addition_record) + .await; } // Roll back all removal records contained in block for removal_record in roll_back_block.kernel.body.transaction.kernel.inputs.iter() { self.archival_mutator_set .ams_mut() - .revert_remove(removal_record); + .revert_remove(removal_record) + .await; } } @@ -752,11 +756,14 @@ impl ArchivalState { // Batch-update all removal records to keep them valid after next addition RemovalRecord::batch_update_from_addition( &mut removal_records, - &mut self.archival_mutator_set.ams_mut().kernel, + &self.archival_mutator_set.ams().accumulator().await, ); // Add the element to the mutator set - self.archival_mutator_set.ams_mut().add(&addition_record); + self.archival_mutator_set + .ams_mut() + .add(&addition_record) + .await; } // Remove items, thus removing the input UTXOs from the mutator set @@ -765,7 +772,10 @@ impl ArchivalState { RemovalRecord::batch_update_from_remove(&mut removal_records, removal_record); // Remove the element from the mutator set - self.archival_mutator_set.ams_mut().remove(removal_record); + self.archival_mutator_set + .ams_mut() + .remove(removal_record) + .await; } } @@ -776,13 +786,15 @@ impl ArchivalState { .kernel.body .mutator_set_accumulator .hash(), - self.archival_mutator_set.ams().hash(), + self.archival_mutator_set.ams().hash().await, "Calculated archival mutator set commitment must match that from newly added block. Block Digest: {:?}", new_block.hash() ); // Persist updated mutator set to disk, with sync label - self.archival_mutator_set.set_sync_label(new_block.hash()); - self.archival_mutator_set.persist(); + self.archival_mutator_set + .set_sync_label(new_block.hash()) + .await; + self.archival_mutator_set.persist().await; Ok(()) } @@ -794,6 +806,7 @@ mod archival_state_tests { use super::*; use crate::config_models::network::Network; + use crate::database::storage::storage_vec::traits::*; use crate::models::blockchain::transaction::utxo::LockScript; use crate::models::blockchain::transaction::utxo::Utxo; use crate::models::blockchain::transaction::PublicAnnouncement; @@ -814,7 +827,6 @@ mod archival_state_tests { use rand::SeedableRng; use rand::{random, thread_rng, RngCore}; use tracing_test::traced_test; - use twenty_first::util_types::storage_vec::traits::*; async fn make_test_archival_state(network: Network) -> ArchivalState { let (block_index_db, _peer_db_lock, data_dir) = unit_test_databases(network).await.unwrap(); @@ -831,29 +843,26 @@ mod archival_state_tests { async fn initialize_archival_state_test() -> Result<()> { // Ensure that the archival state can be initialized without overflowing the stack let seed: [u8; 32] = thread_rng().gen(); - tokio::spawn(async move { - let mut rng: StdRng = SeedableRng::from_seed(seed); - let network = Network::RegTest; + let mut rng: StdRng = SeedableRng::from_seed(seed); + let network = Network::RegTest; - let mut archival_state0 = make_test_archival_state(network).await; + let mut archival_state0 = make_test_archival_state(network).await; - let b = Block::genesis_block(network); - let some_wallet_secret = WalletSecret::new_random(); - let some_spending_key = some_wallet_secret.nth_generation_spending_key(0); - let some_receiving_address = some_spending_key.to_address(); + let b = Block::genesis_block(network); + let some_wallet_secret = WalletSecret::new_random(); + let some_spending_key = some_wallet_secret.nth_generation_spending_key(0); + let some_receiving_address = some_spending_key.to_address(); - let (block_1, _, _) = - make_mock_block_with_valid_pow(&b, None, some_receiving_address, rng.gen()); - add_block_to_archival_state(&mut archival_state0, block_1.clone()) - .await - .unwrap(); - let _c = archival_state0 - .get_block(block_1.hash()) - .await - .unwrap() - .unwrap(); - }) - .await?; + let (block_1, _, _) = + make_mock_block_with_valid_pow(&b, None, some_receiving_address, rng.gen()); + add_block_to_archival_state(&mut archival_state0, block_1.clone()) + .await + .unwrap(); + let _c = archival_state0 + .get_block(block_1.hash()) + .await + .unwrap() + .unwrap(); Ok(()) } @@ -876,15 +885,15 @@ mod archival_state_tests { archival_state .archival_mutator_set .ams() - .kernel .aocl - .count_leaves(), + .count_leaves() + .await, "Archival mutator set must be populated with premine outputs" ); assert_eq!( Block::genesis_block(network).hash(), - archival_state.archival_mutator_set.get_sync_label(), + archival_state.archival_mutator_set.get_sync_label().await, "AMS must be synced to genesis block after initialization from genesis block" ); @@ -921,7 +930,8 @@ mod archival_state_tests { mock_block_1.hash(), restored_archival_state .archival_mutator_set - .get_sync_label(), + .get_sync_label() + .await, "sync_label of restored archival mutator set must be digest of latest block" ); @@ -979,7 +989,7 @@ mod archival_state_tests { .chain .archival_state() .archival_mutator_set; - assert_ne!(0, ams_ref.ams().kernel.aocl.count_leaves()); + assert_ne!(0, ams_ref.ams().aocl.count_leaves().await); } let now = mock_block_1.kernel.header.timestamp; @@ -1010,10 +1020,12 @@ mod archival_state_tests { ) .await .unwrap(); - mock_block_2.accumulate_transaction( - sender_tx, - &mock_block_1.kernel.body.mutator_set_accumulator, - ); + mock_block_2 + .accumulate_transaction( + sender_tx, + &mock_block_1.kernel.body.mutator_set_accumulator, + ) + .await; // Remove an element from the mutator set, verify that the active window DB is updated. add_block(&mut genesis_receiver_global_state, mock_block_2.clone()).await?; @@ -1028,7 +1040,7 @@ mod archival_state_tests { .chain .archival_state() .archival_mutator_set; - assert_ne!(0, ams_ref.ams().kernel.swbf_active.sbf.len()); + assert_ne!(0, ams_ref.ams().swbf_active.sbf.len()); } Ok(()) @@ -1145,14 +1157,16 @@ mod archival_state_tests { .await .unwrap(); - block_1a.accumulate_transaction( - sender_tx, - &archival_state - .genesis_block - .kernel - .body - .mutator_set_accumulator, - ); + block_1a + .accumulate_transaction( + sender_tx, + &archival_state + .genesis_block + .kernel + .body + .mutator_set_accumulator, + ) + .await; assert!(block_1a.is_valid(&genesis_block, now + seven_months)); @@ -1197,7 +1211,6 @@ mod archival_state_tests { archival_state .archival_mutator_set .ams() - .kernel .swbf_active .sbf .is_empty(), @@ -1209,9 +1222,9 @@ mod archival_state_tests { archival_state .archival_mutator_set .ams() - .kernel .aocl - .count_leaves() as usize, + .count_leaves() + .await as usize, "AOCL leaf count must agree with blockchain after rollback" ); } @@ -1276,10 +1289,12 @@ mod archival_state_tests { .await .unwrap(); - next_block.accumulate_transaction( - sender_tx, - &previous_block.kernel.body.mutator_set_accumulator, - ); + next_block + .accumulate_transaction( + sender_tx, + &previous_block.kernel.body.mutator_set_accumulator, + ) + .await; assert!( next_block.is_valid(&previous_block, now + seven_months), @@ -1369,7 +1384,6 @@ mod archival_state_tests { .archival_state() .archival_mutator_set .ams() - .kernel .swbf_active .sbf .is_empty(), @@ -1383,9 +1397,8 @@ mod archival_state_tests { .archival_state() .archival_mutator_set .ams() - .kernel .aocl - .count_leaves() as usize, + .count_leaves().await as usize, "AOCL leaf count must agree with #premine allocations + #transaction outputs in all blocks, even after rollback" ); @@ -1430,10 +1443,12 @@ mod archival_state_tests { .await .unwrap(); - block_1_a.accumulate_transaction( - sender_tx, - &genesis_block.kernel.body.mutator_set_accumulator, - ); + block_1_a + .accumulate_transaction( + sender_tx, + &genesis_block.kernel.body.mutator_set_accumulator, + ) + .await; // Block with signed transaction must validate assert!(block_1_a.is_valid(&genesis_block, now + seven_months)); @@ -1533,10 +1548,12 @@ mod archival_state_tests { .unwrap(); // Absorb and verify validity - block_1.accumulate_transaction( - tx_to_alice_and_bob, - &genesis_block.kernel.body.mutator_set_accumulator, - ); + block_1 + .accumulate_transaction( + tx_to_alice_and_bob, + &genesis_block.kernel.body.mutator_set_accumulator, + ) + .await; assert!(block_1.is_valid(&genesis_block, launch + seven_months)); } @@ -1586,7 +1603,7 @@ mod archival_state_tests { .wallet_state .wallet_db .monitored_utxos() - .len(), "Genesis receiver must have 3 UTXOs after block 1: change from transaction, coinbase from block 1, and the spent premine UTXO" + .len().await, "Genesis receiver must have 3 UTXOs after block 1: change from transaction, coinbase from block 1, and the spent premine UTXO" ); } @@ -1738,11 +1755,15 @@ mod archival_state_tests { genesis_spending_key.to_address(), rng.gen(), ); - block_2.accumulate_transaction(tx_from_alice, &block_1.kernel.body.mutator_set_accumulator); + block_2 + .accumulate_transaction(tx_from_alice, &block_1.kernel.body.mutator_set_accumulator) + .await; assert_eq!(2, block_2.kernel.body.transaction.kernel.inputs.len()); assert_eq!(3, block_2.kernel.body.transaction.kernel.outputs.len()); - block_2.accumulate_transaction(tx_from_bob, &block_1.kernel.body.mutator_set_accumulator); + block_2 + .accumulate_transaction(tx_from_bob, &block_1.kernel.body.mutator_set_accumulator) + .await; // Sanity checks assert_eq!(4, block_2.kernel.body.transaction.kernel.inputs.len()); @@ -1858,7 +1879,7 @@ mod archival_state_tests { .wallet_state .wallet_db .monitored_utxos() - .len(), "Genesis receiver must have 9 UTXOs after block 2: 3 after block 1, and 6 added by block 2" + .len().await, "Genesis receiver must have 9 UTXOs after block 2: 3 after block 1, and 6 added by block 2" ); // Verify that mutator sets are updated correctly and that last block is block 2 @@ -1872,7 +1893,8 @@ mod archival_state_tests { .archival_state() .archival_mutator_set .ams() - .accumulator(), + .accumulator() + .await, "AMS must be correctly updated" ); assert_eq!( @@ -3034,6 +3056,7 @@ mod archival_state_tests { async fn can_initialize_mutator_set_database() { let args: cli_args::Args = cli_args::Args::default(); let data_dir = DataDirectory::get(args.data_dir.clone(), args.network).unwrap(); + println!("data_dir for MS initialization test: {data_dir}"); let _rams = ArchivalState::initialize_mutator_set(&data_dir) .await .unwrap(); diff --git a/src/models/state/mempool.rs b/src/models/state/mempool.rs index 17f52480..f5fb3663 100644 --- a/src/models/state/mempool.rs +++ b/src/models/state/mempool.rs @@ -285,7 +285,7 @@ impl Mempool { /// Remove from the mempool all transactions that become invalid because /// of this newly mined block. Also update all mutator set data for monitored /// transactions that were not removed in the previous step. - pub fn update_with_block( + pub async fn update_with_block( &mut self, previous_mutator_set_accumulator: MutatorSetAccumulator, block: &Block, @@ -406,7 +406,6 @@ mod tests { get_mock_global_state, get_mock_wallet_state, make_mock_block, make_mock_transaction_with_wallet, }, - util_types::mutator_set::mutator_set_trait::MutatorSet, }; use anyhow::Result; use itertools::Itertools; @@ -538,18 +537,21 @@ mod tests { #[traced_test] #[tokio::test] async fn remove_transactions_with_block_test() -> Result<()> { - let mut rng: StdRng = - SeedableRng::from_rng(thread_rng()).expect("failure lifting thread_rng to StdRng"); - let seed: [u8; 32] = rng.gen(); - // let seed = [ - // 0x19, 0xba, 0xc1, 0x55, 0xa7, 0xa0, 0x33, 0xcc, 0x85, 0x73, 0x47, 0xad, 0xd2, 0x1b, - // 0x4e, 0x30, 0x54, 0x4b, 0xd3, 0x2e, 0xe0, 0xc2, 0x21, 0xe6, 0x96, 0x82, 0x2a, 0x6, 0xe, - // 0xe2, 0xa, 0xda, - // ]; - println!( - "seed: [{}]", - seed.iter().map(|h| format!("{:#x}", h)).join(", ") - ); + let seed = { + let mut rng: StdRng = + SeedableRng::from_rng(thread_rng()).expect("failure lifting thread_rng to StdRng"); + let seed: [u8; 32] = rng.gen(); + // let seed = [ + // 0x19, 0xba, 0xc1, 0x55, 0xa7, 0xa0, 0x33, 0xcc, 0x85, 0x73, 0x47, 0xad, 0xd2, 0x1b, + // 0x4e, 0x30, 0x54, 0x4b, 0xd3, 0x2e, 0xe0, 0xc2, 0x21, 0xe6, 0x96, 0x82, 0x2a, 0x6, 0xe, + // 0xe2, 0xa, 0xda, + // ]; + println!( + "seed: [{}]", + seed.iter().map(|h| format!("{:#x}", h)).join(", ") + ); + seed + }; let mut rng: StdRng = SeedableRng::from_seed(seed); // We need the global state to construct a transaction. This global state @@ -669,11 +671,14 @@ mod tests { let (mut block_2, _, _) = make_mock_block(&block_1, None, premine_receiver_address, rng.gen()); block_2 - .accumulate_transaction(tx_by_preminer, &block_1.kernel.body.mutator_set_accumulator); + .accumulate_transaction(tx_by_preminer, &block_1.kernel.body.mutator_set_accumulator) + .await; // Update the mempool with block 2 and verify that the mempool now only contains one tx assert_eq!(2, mempool.len()); - mempool.update_with_block(block_1.kernel.body.mutator_set_accumulator, &block_2); + mempool + .update_with_block(block_1.kernel.body.mutator_set_accumulator, &block_2) + .await; assert_eq!(1, mempool.len()); // Create a new block to verify that the non-mined transaction contains @@ -713,10 +718,12 @@ mod tests { "tx_by_other_updated has mutator set hash: {}", tx_by_other_updated.kernel.mutator_set_hash.emojihash() ); - block_3_with_updated_tx.accumulate_transaction( - tx_by_other_updated.clone(), - &block_2.kernel.body.mutator_set_accumulator, - ); + block_3_with_updated_tx + .accumulate_transaction( + tx_by_other_updated.clone(), + &block_2.kernel.body.mutator_set_accumulator, + ) + .await; now = block_2.kernel.header.timestamp; assert!( block_3_with_updated_tx.is_valid(&block_2, now + seven_months), @@ -730,10 +737,12 @@ mod tests { for _ in 0..10 { let (next_block, _, _) = make_mock_block(&previous_block, None, other_receiver_address, rng.gen()); - mempool.update_with_block( - previous_block.kernel.body.mutator_set_accumulator, - &next_block, - ); + mempool + .update_with_block( + previous_block.kernel.body.mutator_set_accumulator, + &next_block, + ) + .await; previous_block = next_block; } @@ -741,20 +750,24 @@ mod tests { make_mock_block(&previous_block, None, other_receiver_address, rng.gen()); assert_eq!(Into::::into(14), block_14.kernel.header.height); tx_by_other_updated = mempool.get_transactions_for_block(usize::MAX)[0].clone(); - block_14.accumulate_transaction( - tx_by_other_updated, - &previous_block.kernel.body.mutator_set_accumulator, - ); + block_14 + .accumulate_transaction( + tx_by_other_updated, + &previous_block.kernel.body.mutator_set_accumulator, + ) + .await; now = previous_block.kernel.header.timestamp; assert!( block_14.is_valid(&previous_block, now+seven_months), "Block with tx with updated mutator set data must be valid after 10 blocks have been mined" ); - mempool.update_with_block( - previous_block.kernel.body.mutator_set_accumulator, - &block_14, - ); + mempool + .update_with_block( + previous_block.kernel.body.mutator_set_accumulator, + &block_14, + ) + .await; assert!( mempool.is_empty(), diff --git a/src/models/state/mod.rs b/src/models/state/mod.rs index a2d4a36d..7f23c829 100644 --- a/src/models/state/mod.rs +++ b/src/models/state/mod.rs @@ -2,6 +2,10 @@ use crate::models::consensus::mast_hash::MastHash; use crate::prelude::twenty_first; use crate::util_types::mutator_set::mutator_set_accumulator::MutatorSetAccumulator; +use crate::database::storage::storage_schema::traits::StorageWriter as SW; +use crate::database::storage::storage_vec::traits::*; +use crate::database::storage::storage_vec::Index; +use crate::util_types::mutator_set::commit; use anyhow::{bail, Result}; use itertools::Itertools; use num_traits::CheckedSub; @@ -10,10 +14,7 @@ use std::ops::{Deref, DerefMut}; use tracing::{debug, info, warn}; use twenty_first::shared_math::bfield_codec::BFieldCodec; use twenty_first::shared_math::digest::Digest; -use twenty_first::storage::storage_schema::traits::*; -use twenty_first::storage::storage_vec::traits::*; use twenty_first::util_types::algebraic_hasher::AlgebraicHasher; -use twenty_first::util_types::mmr::mmr_trait::Mmr; use self::blockchain_state::BlockchainState; use self::mempool::Mempool; @@ -37,15 +38,14 @@ use super::blockchain::type_scripts::TypeScript; use super::consensus::tasm::program::ConsensusProgram; use super::consensus::timestamp::Timestamp; use crate::config_models::cli_args; +use crate::locks::tokio as sync_tokio; use crate::models::peer::HandshakeData; use crate::models::state::wallet::monitored_utxo::MonitoredUtxo; use crate::models::state::wallet::utxo_notification_pool::ExpectedUtxo; use crate::time_fn_call_async; use crate::util_types::mutator_set::addition_record::AdditionRecord; use crate::util_types::mutator_set::ms_membership_proof::MsMembershipProof; -use crate::util_types::mutator_set::mutator_set_trait::{commit, MutatorSet}; use crate::util_types::mutator_set::removal_record::RemovalRecord; -use crate::util_types::sync::tokio as sync_tokio; use crate::{Hash, VERSION}; @@ -57,11 +57,11 @@ pub mod networking_state; pub mod shared; pub mod wallet; -/// `GlobalStateLock` holds a [`tokio::AtomicRw`](crate::util_types::sync::tokio::AtomicRw) +/// `GlobalStateLock` holds a [`tokio::AtomicRw`](crate::locks::tokio::AtomicRw) /// ([`RwLock`](std::sync::RwLock)) over [`GlobalState`]. /// /// Conceptually** all reads and writes of application state -/// require acuiring this lock. +/// require acquiring this lock. /// /// Having a single lock is useful for a few reasons: /// 1. Enables write serialization over all application state. @@ -79,10 +79,8 @@ pub mod wallet; /// Readers do not block eachother. Only a writer blocks readers. /// See [`RwLock`](std::sync::RwLock) docs for details. /// -/// ** At the present time, storage types in twenty_first::storage -/// implement their own locking, which means they can be mutated -/// without acquiring the `GlobalStateLock`. This may change in -/// the future. +/// ** unless some type uses interior mutability. We have made +/// efforts to eradicate interior mutability in this crate. /// /// Usage conventions: /// @@ -270,7 +268,9 @@ impl GlobalState { pub async fn get_wallet_status_for_tip(&self) -> WalletStatus { let tip_digest = self.chain.light_state().hash(); - self.wallet_state.get_wallet_status_from_lock(tip_digest) + self.wallet_state + .get_wallet_status_from_lock(tip_digest) + .await } pub async fn get_latest_balance_height(&self) -> Option { @@ -300,7 +300,7 @@ impl GlobalState { let current_tip_digest = self.chain.light_state().hash(); let monitored_utxos = self.wallet_state.wallet_db.monitored_utxos(); - if monitored_utxos.is_empty() { + if monitored_utxos.is_empty().await { return None; } @@ -314,7 +314,17 @@ impl GlobalState { // // We then continue working backward through all entries to // determine max(spent_in_block) - for (_i, mutxo) in monitored_utxos.many_iter((0..monitored_utxos.len()).rev()) { + + // note: Stream trait does not have a way to reverse, so instead + // of stream_values() we use stream_many_values() and supply + // an iterator of indexes that are already reversed. + + let stream = monitored_utxos + .stream_many_values((0..monitored_utxos.len().await).rev()) + .await; + pin_mut!(stream); // needed for iteration + + while let Some(mutxo) = stream.next().await { if max_confirmed_in_block.is_none() { if let Some((.., confirmed_in_block)) = mutxo.confirmed_in_block { if mutxo @@ -349,7 +359,10 @@ impl GlobalState { // let num_monitored_utxos = monitored_utxos.len(); let mut history = vec![]; - for (_idx, monitored_utxo) in monitored_utxos.iter() { + + let stream = monitored_utxos.stream_values().await; + pin_mut!(stream); // needed for iteration + while let Some(monitored_utxo) = stream.next().await { if monitored_utxo .get_membership_proof_for_block(current_tip_digest) .is_none() @@ -405,9 +418,7 @@ impl GlobalState { ) -> Vec { let mut inputs: Vec = vec![]; for (spendable_utxo, _lock_script, mp) in spendable_utxos_and_mps.iter() { - let removal_record = mutator_set_accumulator - .kernel - .drop(Hash::hash(spendable_utxo), mp); + let removal_record = mutator_set_accumulator.drop(Hash::hash(spendable_utxo), mp); inputs.push(removal_record); } inputs @@ -697,7 +708,7 @@ impl GlobalState { assert_eq!( tip_hash, - ams_ref.get_sync_label(), + ams_ref.get_sync_label().await, "Archival mutator set must be synced to tip for successful MUTXO recovery" ); @@ -710,26 +721,36 @@ impl GlobalState { // monitored UTXO in the database. All monitored UTXOs are fetched outside // of the loop to avoid DB access/IO inside the loop. let mut recovery_data_for_missing_mutxos = vec![]; - let mutxos = self.wallet_state.wallet_db.monitored_utxos().get_all(); - '_outer: for incoming_utxo in incoming_utxos.into_iter() { - 'inner: for monitored_utxo in mutxos.iter() { - if monitored_utxo.utxo == incoming_utxo.utxo { - let msmp_res = monitored_utxo.get_latest_membership_proof_entry(); - let msmp = match msmp_res { - Some((_blockh_hash, msmp_val)) => msmp_val, - None => continue 'inner, - }; - - // If UTXO matches, then check if the AOCL index is also a match. - // If it is, then the UTXO is already in the wallet database. - if msmp.auth_path_aocl.leaf_index == incoming_utxo.aocl_index { - continue '_outer; + + { + let stream = self + .wallet_state + .wallet_db + .monitored_utxos() + .stream_values() + .await; + pin_mut!(stream); // needed for iteration + + '_outer: for incoming_utxo in incoming_utxos.into_iter() { + 'inner: while let Some(monitored_utxo) = stream.next().await { + if monitored_utxo.utxo == incoming_utxo.utxo { + let msmp_res = monitored_utxo.get_latest_membership_proof_entry(); + let msmp = match msmp_res { + Some((_blockh_hash, msmp_val)) => msmp_val, + None => continue 'inner, + }; + + // If UTXO matches, then check if the AOCL index is also a match. + // If it is, then the UTXO is already in the wallet database. + if msmp.auth_path_aocl.leaf_index == incoming_utxo.aocl_index { + continue '_outer; + } } } - } - // If no match is found, add the UTXO to the list of missing UTXOs - recovery_data_for_missing_mutxos.push(incoming_utxo); + // If no match is found, add the UTXO to the list of missing UTXOs + recovery_data_for_missing_mutxos.push(incoming_utxo); + } } if recovery_data_for_missing_mutxos.is_empty() { @@ -746,7 +767,7 @@ impl GlobalState { "Attempting to restore {} missing monitored UTXOs to wallet database", recovery_data_for_missing_mutxos.len() ); - let current_aocl_leaf_count = ams_ref.ams().kernel.aocl.count_leaves(); + let current_aocl_leaf_count = ams_ref.ams().aocl.count_leaves().await; let mut restored_mutxos = 0; for incoming_utxo in recovery_data_for_missing_mutxos { // If the referenced UTXO is in the future from our tip, do not attempt to recover it. Instead: warn the user of this. @@ -755,16 +776,19 @@ impl GlobalState { continue; } let ms_item = Hash::hash(&incoming_utxo.utxo); - let restored_msmp_res = ams_ref.ams().restore_membership_proof( - ms_item, - incoming_utxo.sender_randomness, - incoming_utxo.receiver_preimage, - incoming_utxo.aocl_index, - ); + let restored_msmp_res = ams_ref + .ams() + .restore_membership_proof( + ms_item, + incoming_utxo.sender_randomness, + incoming_utxo.receiver_preimage, + incoming_utxo.aocl_index, + ) + .await; let restored_msmp = match restored_msmp_res { Ok(msmp) => { // Verify that the restored MSMP is valid - if !ams_ref.ams().verify(ms_item, &msmp) { + if !ams_ref.ams().verify(ms_item, &msmp).await { warn!("Restored MSMP is invalid. Skipping restoration of UTXO with AOCL index {}. Maybe this UTXO is on an abandoned chain?", incoming_utxo.aocl_index); continue; } @@ -781,11 +805,12 @@ impl GlobalState { self.wallet_state .wallet_db .monitored_utxos_mut() - .push(restored_mutxo); + .push(restored_mutxo) + .await; restored_mutxos += 1; } - self.wallet_state.wallet_db.persist(); + self.wallet_state.wallet_db.persist().await; info!("Successfully restored {restored_mutxos} monitored UTXOs to wallet database"); Ok(()) @@ -800,11 +825,9 @@ impl GlobalState { // loop over all monitored utxos let monitored_utxos = self.wallet_state.wallet_db.monitored_utxos_mut(); - // note: iter_mut_lock holds a write-lock, so it should be dropped - // immediately after use. - let mut iter_mut_lock = monitored_utxos.iter_mut(); - 'outer: while let Some(mut setter) = iter_mut_lock.next() { - let monitored_utxo = setter.value(); + 'outer: for i in 0..monitored_utxos.len().await { + let i = i as Index; + let monitored_utxo = monitored_utxos.get(i).await; // Ignore those MUTXOs that were marked as abandoned if monitored_utxo.abandoned_at.is_some() { @@ -818,7 +841,7 @@ impl GlobalState { debug!( "Resyncing monitored UTXO number {}, with hash {}", - setter.index(), + i, Hash::hash(&monitored_utxo.utxo) ); @@ -959,13 +982,14 @@ impl GlobalState { // store updated membership proof monitored_utxo.add_membership_proof_for_tip(tip_hash, membership_proof); - setter.set(monitored_utxo); + + // update storage. + monitored_utxos.set(i, monitored_utxo).await } - drop(iter_mut_lock); // <---- releases write lock. // Update sync label and persist - self.wallet_state.wallet_db.set_sync_label(tip_hash); - self.wallet_state.wallet_db.persist(); + self.wallet_state.wallet_db.set_sync_label(tip_hash).await; + self.wallet_state.wallet_db.persist().await; Ok(()) } @@ -999,17 +1023,13 @@ impl GlobalState { current_tip_header.height, ); - let mut updates = std::collections::BTreeMap::new(); + let monitored_utxos = self.wallet_state.wallet_db.monitored_utxos_mut(); + let mut removed_count = 0; // Find monitored_utxo for updating - for (i, mut mutxo) in self - .wallet_state - .wallet_db - .monitored_utxos() - .get_all() - .into_iter() - .enumerate() - { + for i in 0..monitored_utxos.len().await { + let mut mutxo = monitored_utxos.get(i).await; + // 1. Spent MUTXOs are not marked as abandoned, as there's no reason to maintain them // once the spending block is buried sufficiently deep // 2. If synced to current tip, there is nothing more to do with this MUTXO @@ -1034,25 +1054,18 @@ impl GlobalState { if abandoned { mutxo.abandoned_at = Some(current_tip_info); - updates.insert(i as u64, mutxo); + monitored_utxos.set(i, mutxo).await; + removed_count += 1; } } } - let removed_count = updates.iter().len(); - - // apply updates - self.wallet_state - .wallet_db - .monitored_utxos_mut() - .set_many(updates); - Ok(removed_count) } pub async fn flush_databases(&mut self) -> Result<()> { // flush wallet databases - self.wallet_state.wallet_db.persist(); + self.wallet_state.wallet_db.persist().await; // flush block_index database self.chain.archival_state_mut().block_index_db.flush().await; @@ -1062,12 +1075,14 @@ impl GlobalState { self.chain .archival_state_mut() .archival_mutator_set - .set_sync_label(hash); + .set_sync_label(hash) + .await; self.chain .archival_state_mut() .archival_mutator_set - .persist(); + .persist() + .await; // flush peer_standings self.net.peer_databases.peer_standings.flush().await; @@ -1097,58 +1112,84 @@ impl GlobalState { new_block: Block, coinbase_utxo_info: Option, ) -> Result<()> { - // get proof_of_work_family for tip - let tip_proof_of_work_family = self.chain.light_state().kernel.header.proof_of_work_family; - let previous_mutator_set_accumulator = self - .chain - .light_state() - .kernel - .body - .mutator_set_accumulator - .clone(); + // note: we make this fn internal so we can log its duration and ensure it will + // never be called directly by another fn, without the timings. + + async fn store_block_internal_worker( + myself: &mut GlobalState, + new_block: Block, + coinbase_utxo_info: Option, + ) -> Result<()> { + // get proof_of_work_family for tip + let tip_proof_of_work_family = myself + .chain + .light_state() + .kernel + .header + .proof_of_work_family; + let previous_mutator_set_accumulator = myself + .chain + .light_state() + .kernel + .body + .mutator_set_accumulator + .clone(); - // Apply the updates - self.chain - .archival_state_mut() - .write_block(&new_block, Some(tip_proof_of_work_family)) - .await?; + // Apply the updates + myself + .chain + .archival_state_mut() + .write_block(&new_block, Some(tip_proof_of_work_family)) + .await?; - // update the mutator set with the UTXOs from this block - self.chain - .archival_state_mut() - .update_mutator_set(&new_block) - .await - .expect("Updating mutator set must succeed"); + // update the mutator set with the UTXOs from this block + myself + .chain + .archival_state_mut() + .update_mutator_set(&new_block) + .await + .expect("Updating mutator set must succeed"); - if let Some(coinbase_info) = coinbase_utxo_info { - // Notify wallet to expect the coinbase UTXO, as we mined this block - self.wallet_state - .expected_utxos - .add_expected_utxo( - coinbase_info.utxo, - coinbase_info.sender_randomness, - coinbase_info.receiver_preimage, - UtxoNotifier::OwnMiner, - ) - .expect("UTXO notification from miner must be accepted"); - } + if let Some(coinbase_info) = coinbase_utxo_info { + // Notify wallet to expect the coinbase UTXO, as we mined this block + myself + .wallet_state + .expected_utxos + .add_expected_utxo( + coinbase_info.utxo, + coinbase_info.sender_randomness, + coinbase_info.receiver_preimage, + UtxoNotifier::OwnMiner, + ) + .expect("UTXO notification from miner must be accepted"); + } - // update wallet state with relevant UTXOs from this block - self.wallet_state - .update_wallet_state_with_new_block(&previous_mutator_set_accumulator, &new_block) - .await?; + // update wallet state with relevant UTXOs from this block + myself + .wallet_state + .update_wallet_state_with_new_block(&previous_mutator_set_accumulator, &new_block) + .await?; - // Update mempool with UTXOs from this block. This is done by removing all transaction - // that became invalid/was mined by this block. - self.mempool - .update_with_block(previous_mutator_set_accumulator, &new_block); + // Update mempool with UTXOs from this block. This is done by removing all transaction + // that became invalid/was mined by this block. + myself + .mempool + .update_with_block(previous_mutator_set_accumulator, &new_block) + .await; - self.chain.light_state_mut().set_block(new_block); + myself.chain.light_state_mut().set_block(new_block); - // Flush databases - self.flush_databases().await?; + // Flush databases + myself.flush_databases().await?; - Ok(()) + Ok(()) + } + + crate::macros::duration_async_info!(store_block_internal_worker( + self, + new_block, + coinbase_utxo_info + )) } /// resync membership proofs @@ -1207,7 +1248,7 @@ mod global_state_tests { tip_block: &Block, ) -> bool { let monitored_utxos = wallet_state.wallet_db.monitored_utxos(); - for (_idx, monitored_utxo) in monitored_utxos.iter() { + for monitored_utxo in monitored_utxos.get_all().await.iter() { let current_mp = monitored_utxo.get_membership_proof_for_block(tip_block.hash()); match current_mp { @@ -1316,7 +1357,8 @@ mod global_state_tests { .wallet_state .wallet_db .monitored_utxos() - .get_all(); + .get_all() + .await; assert_ne!(monitored_utxos.len(), 0); // one month before release date, we should not be able to create the transaction @@ -1436,13 +1478,13 @@ mod global_state_tests { { let monitored_utxos = global_state.wallet_state.wallet_db.monitored_utxos_mut(); assert!( - monitored_utxos.len().is_one(), + monitored_utxos.len().await.is_one(), "MUTXO must have genesis element before emptying it" ); - monitored_utxos.pop(); + monitored_utxos.pop().await; assert!( - monitored_utxos.is_empty(), + monitored_utxos.is_empty().await, "MUTXO must be empty after emptying it" ); } @@ -1455,12 +1497,12 @@ mod global_state_tests { { let monitored_utxos = global_state.wallet_state.wallet_db.monitored_utxos(); assert!( - monitored_utxos.len().is_one(), + monitored_utxos.len().await.is_one(), "MUTXO must have genesis element after recovering it" ); // Verify that the restored MUTXO has a valid MSMP - let own_premine_mutxo = monitored_utxos.get(0); + let own_premine_mutxo = monitored_utxos.get(0).await; let ms_item = Hash::hash(&own_premine_mutxo.utxo); global_state .chain @@ -1608,7 +1650,8 @@ mod global_state_tests { // Verify that wallet has monitored UTXOs, from genesis and from block_1a let wallet_status = global_state .wallet_state - .get_wallet_status_from_lock(mock_block_1a.hash()); + .get_wallet_status_from_lock(mock_block_1a.hash()) + .await; assert_eq!(2, wallet_status.synced_unspent.len()); // Make a new fork from genesis that makes us lose the coinbase UTXO of block 1a @@ -1648,7 +1691,8 @@ mod global_state_tests { // Verify that one MUTXO is unsynced, and that 1 (from genesis) is synced let wallet_status_after_forking = global_state .wallet_state - .get_wallet_status_from_lock(parent_block.hash()); + .get_wallet_status_from_lock(parent_block.hash()) + .await; assert_eq!(1, wallet_status_after_forking.synced_unspent.len()); assert_eq!(1, wallet_status_after_forking.unsynced_unspent.len()); @@ -1658,6 +1702,7 @@ mod global_state_tests { assert!( !monitored_utxos .get(0) + .await .was_abandoned( parent_block.kernel.mast_hash(), global_state.chain.archival_state() @@ -1667,6 +1712,7 @@ mod global_state_tests { assert!( monitored_utxos .get(1) + .await .was_abandoned( parent_block.kernel.mast_hash(), global_state.chain.archival_state() @@ -1729,7 +1775,8 @@ mod global_state_tests { // Verify that UTXO was recorded let wallet_status_after_1a = global_state .wallet_state - .get_wallet_status_from_lock(mock_block_1a.hash()); + .get_wallet_status_from_lock(mock_block_1a.hash()) + .await; assert_eq!(2, wallet_status_after_1a.synced_unspent.len()); } @@ -1760,7 +1807,8 @@ mod global_state_tests { // Verify that all both MUTXOs have synced MPs let wallet_status_on_a_fork = global_state .wallet_state - .get_wallet_status_from_lock(fork_a_block.hash()); + .get_wallet_status_from_lock(fork_a_block.hash()) + .await; assert_eq!(2, wallet_status_on_a_fork.synced_unspent.len()); @@ -1791,7 +1839,8 @@ mod global_state_tests { // Verify that there are zero MUTXOs with synced MPs let wallet_status_on_b_fork_before_resync = global_state .wallet_state - .get_wallet_status_from_lock(fork_b_block.hash()); + .get_wallet_status_from_lock(fork_b_block.hash()) + .await; assert_eq!( 0, wallet_status_on_b_fork_before_resync.synced_unspent.len() @@ -1808,7 +1857,8 @@ mod global_state_tests { .unwrap(); let wallet_status_on_b_fork_after_resync = global_state .wallet_state - .get_wallet_status_from_lock(fork_b_block.hash()); + .get_wallet_status_from_lock(fork_b_block.hash()) + .await; assert_eq!(2, wallet_status_on_b_fork_after_resync.synced_unspent.len()); assert_eq!( 0, @@ -1844,7 +1894,8 @@ mod global_state_tests { // Verify that there are zero MUTXOs with synced MPs let wallet_status_on_c_fork_before_resync = global_state .wallet_state - .get_wallet_status_from_lock(fork_c_block.hash()); + .get_wallet_status_from_lock(fork_c_block.hash()) + .await; assert_eq!( 0, wallet_status_on_c_fork_before_resync.synced_unspent.len() @@ -1862,7 +1913,8 @@ mod global_state_tests { .unwrap(); let wallet_status_on_c_fork_after_resync = global_state .wallet_state - .get_wallet_status_from_lock(fork_c_block.hash()); + .get_wallet_status_from_lock(fork_c_block.hash()) + .await; assert_eq!(1, wallet_status_on_c_fork_after_resync.synced_unspent.len()); assert_eq!( 1, @@ -1874,6 +1926,7 @@ mod global_state_tests { assert!( !monitored_utxos .get(0) + .await .was_abandoned( fork_c_block.kernel.mast_hash(), global_state.chain.archival_state() @@ -1883,6 +1936,7 @@ mod global_state_tests { assert!( monitored_utxos .get(1) + .await .was_abandoned( fork_c_block.kernel.mast_hash(), global_state.chain.archival_state() @@ -1895,18 +1949,21 @@ mod global_state_tests { #[tokio::test] async fn flaky_mutator_set_test() { - let mut rng: StdRng = - SeedableRng::from_rng(thread_rng()).expect("failure lifting thread_rng to StdRng"); - let seed: [u8; 32] = rng.gen(); - // let seed = [ - // 0xf4, 0xc2, 0x1c, 0xd0, 0x5a, 0xac, 0x99, 0xe7, 0x3a, 0x1e, 0x29, 0x7f, 0x16, 0xc1, - // 0x50, 0x5e, 0x1e, 0xd, 0x4b, 0x49, 0x51, 0x9c, 0x1b, 0xa0, 0x38, 0x3c, 0xd, 0x83, 0x29, - // 0xdb, 0xab, 0xe2, - // ]; - println!( - "seed: [{}]", - seed.iter().map(|h| format!("{:#x}", h)).join(", ") - ); + let seed = { + let mut rng: StdRng = + SeedableRng::from_rng(thread_rng()).expect("failure lifting thread_rng to StdRng"); + let seed: [u8; 32] = rng.gen(); + // let seed = [ + // 0xf4, 0xc2, 0x1c, 0xd0, 0x5a, 0xac, 0x99, 0xe7, 0x3a, 0x1e, 0x29, 0x7f, 0x16, 0xc1, + // 0x50, 0x5e, 0x1e, 0xd, 0x4b, 0x49, 0x51, 0x9c, 0x1b, 0xa0, 0x38, 0x3c, 0xd, 0x83, 0x29, + // 0xdb, 0xab, 0xe2, + // ]; + println!( + "seed: [{}]", + seed.iter().map(|h| format!("{:#x}", h)).join(", ") + ); + seed + }; let mut rng: StdRng = SeedableRng::from_seed(seed); // Test various parts of the state update when a block contains multiple inputs and outputs @@ -1997,10 +2054,12 @@ mod global_state_tests { .unwrap(); // Absorb and verify validity - block_1.accumulate_transaction( - tx_to_alice_and_bob, - &genesis_block.kernel.body.mutator_set_accumulator, - ); + block_1 + .accumulate_transaction( + tx_to_alice_and_bob, + &genesis_block.kernel.body.mutator_set_accumulator, + ) + .await; let now = genesis_block.kernel.header.timestamp; assert!(block_1.is_valid(&genesis_block, now + seven_months)); } @@ -2051,7 +2110,7 @@ mod global_state_tests { .wallet_state .wallet_db .monitored_utxos() - .len(), "Genesis receiver must have 3 UTXOs after block 1: change from transaction, coinbase from block 1, and the spent premine UTXO" + .len().await, "Genesis receiver must have 3 UTXOs after block 1: change from transaction, coinbase from block 1, and the spent premine UTXO" ); } @@ -2198,11 +2257,15 @@ mod global_state_tests { genesis_spending_key.to_address(), rng.gen(), ); - block_2.accumulate_transaction(tx_from_alice, &block_1.kernel.body.mutator_set_accumulator); + block_2 + .accumulate_transaction(tx_from_alice, &block_1.kernel.body.mutator_set_accumulator) + .await; assert_eq!(2, block_2.kernel.body.transaction.kernel.inputs.len()); assert_eq!(3, block_2.kernel.body.transaction.kernel.outputs.len()); - block_2.accumulate_transaction(tx_from_bob, &block_1.kernel.body.mutator_set_accumulator); + block_2 + .accumulate_transaction(tx_from_bob, &block_1.kernel.body.mutator_set_accumulator) + .await; } #[traced_test] diff --git a/src/models/state/networking_state.rs b/src/models/state/networking_state.rs index 03e7c917..db6e1eda 100644 --- a/src/models/state/networking_state.rs +++ b/src/models/state/networking_state.rs @@ -1,5 +1,5 @@ use crate::config_models::data_directory::DataDirectory; -use crate::database::{create_db_if_missing, NeptuneLevelDb}; +use crate::database::{create_db_if_missing, NeptuneLevelDb, WriteBatchAsync}; use crate::models::database::PeerDatabases; use crate::models::peer::{self, PeerStanding}; use anyhow::Result; @@ -92,10 +92,12 @@ impl NetworkingState { .map(|(ip, _old_standing)| (ip, PeerStanding::default())) .collect(); - self.peer_databases - .peer_standings - .batch_write(new_entries) - .await + let mut batch = WriteBatchAsync::new(); + for (ip, standing) in new_entries.into_iter() { + batch.op_write(ip, standing); + } + + self.peer_databases.peer_standings.batch_write(batch).await } // Storing IP addresses is, according to this answer, not a violation of GDPR: diff --git a/src/models/state/wallet/address/generation_address.rs b/src/models/state/wallet/address/generation_address.rs index c993b6d3..7471c9c7 100644 --- a/src/models/state/wallet/address/generation_address.rs +++ b/src/models/state/wallet/address/generation_address.rs @@ -1,4 +1,5 @@ use crate::prelude::{triton_vm, twenty_first}; +use crate::util_types::mutator_set::commit; use aead::Aead; use aead::KeyInit; @@ -29,7 +30,6 @@ use crate::models::blockchain::transaction::utxo::Utxo; use crate::models::blockchain::transaction::PublicAnnouncement; use crate::models::blockchain::transaction::Transaction; use crate::util_types::mutator_set::addition_record::AdditionRecord; -use crate::util_types::mutator_set::mutator_set_trait::commit; pub const GENERATION_FLAG: BFieldElement = BFieldElement::new(79); diff --git a/src/models/state/wallet/mod.rs b/src/models/state/wallet/mod.rs index 1649c0e3..b2ac4dde 100644 --- a/src/models/state/wallet/mod.rs +++ b/src/models/state/wallet/mod.rs @@ -283,6 +283,7 @@ impl WalletSecret { use std::os::unix::prelude::OpenOptionsExt; fs::OpenOptions::new() .create(true) + .truncate(false) .write(true) .mode(0o600) .open(path) @@ -295,6 +296,7 @@ impl WalletSecret { fn create_wallet_file_windows(path: &PathBuf, wallet_as_json: String) -> Result<()> { fs::OpenOptions::new() .create(true) + .truncate(false) .write(true) .open(path) .unwrap(); @@ -344,13 +346,14 @@ impl WalletSecret { #[cfg(test)] mod wallet_tests { + + use crate::database::storage::storage_vec::traits::*; use itertools::Itertools; use num_traits::CheckedSub; use rand::random; use tracing_test::traced_test; use twenty_first::shared_math::tip5::DIGEST_LENGTH; use twenty_first::shared_math::x_field_element::EXTENSION_DEGREE; - use twenty_first::storage::storage_vec::traits::*; use super::monitored_utxo::MonitoredUtxo; use super::wallet_state::WalletState; @@ -369,11 +372,10 @@ mod wallet_tests { add_block, get_mock_global_state, get_mock_wallet_state, make_mock_block, make_mock_transaction_with_generation_key, }; - use crate::util_types::mutator_set::mutator_set_trait::MutatorSet; async fn get_monitored_utxos(wallet_state: &WalletState) -> Vec { // note: we could just return a DbtVec here and avoid cloning... - wallet_state.wallet_db.monitored_utxos().get_all() + wallet_state.wallet_db.monitored_utxos().get_all().await } #[tokio::test] @@ -770,8 +772,11 @@ mod wallet_tests { receiver_data, NeptuneCoins::zero(), msa_tip_previous.clone(), - ); - next_block.accumulate_transaction(tx, &msa_tip_previous); + ) + .await; + next_block + .accumulate_transaction(tx, &msa_tip_previous) + .await; own_wallet_state .update_wallet_state_with_new_block(&msa_tip_previous.clone(), &next_block) @@ -872,7 +877,9 @@ mod wallet_tests { .await .unwrap(); - block_1.accumulate_transaction(valid_tx, &previous_msa); + block_1 + .accumulate_transaction(valid_tx, &previous_msa) + .await; // Verify the validity of the merged transaction and block assert!(block_1.is_valid(&genesis_block, now + seven_months)); @@ -1001,7 +1008,9 @@ mod wallet_tests { ); // Check that `WalletStatus` is returned correctly - let wallet_status = { own_wallet_state.get_wallet_status_from_lock(block_18.hash()) }; + let wallet_status = own_wallet_state + .get_wallet_status_from_lock(block_18.hash()) + .await; assert_eq!( 19, wallet_status.synced_unspent.len(), @@ -1133,10 +1142,12 @@ mod wallet_tests { .create_transaction(vec![receiver_data_six.clone()], NeptuneCoins::new(4), now) .await .unwrap(); - block_3_b.accumulate_transaction( - tx_from_preminer, - &block_2_b.kernel.body.mutator_set_accumulator, - ); + block_3_b + .accumulate_transaction( + tx_from_preminer, + &block_2_b.kernel.body.mutator_set_accumulator, + ) + .await; assert!( block_3_b.is_valid(&block_2_b, now), "Block must be valid after accumulating txs" diff --git a/src/models/state/wallet/rusty_wallet_database.rs b/src/models/state/wallet/rusty_wallet_database.rs index da4d64f7..c45092e9 100644 --- a/src/models/state/wallet/rusty_wallet_database.rs +++ b/src/models/state/wallet/rusty_wallet_database.rs @@ -1,10 +1,12 @@ use crate::prelude::twenty_first; -use twenty_first::shared_math::tip5::Digest; -use twenty_first::{ - storage::level_db::DB, - storage::storage_schema::{traits::*, DbtSingleton, DbtVec, SimpleRustyStorage}, +use crate::database::{ + storage::storage_schema::{ + traits::*, DbtSingleton, DbtVec, RustyKey, RustyValue, SimpleRustyStorage, + }, + NeptuneLevelDb, }; +use twenty_first::shared_math::tip5::Digest; use super::monitored_utxo::MonitoredUtxo; @@ -21,18 +23,19 @@ pub struct RustyWalletDatabase { } impl RustyWalletDatabase { - pub fn connect(db: DB) -> Self { + pub async fn connect(db: NeptuneLevelDb) -> Self { let mut storage = SimpleRustyStorage::new_with_callback( db, "RustyWalletDatabase-Schema", crate::LOG_LOCK_EVENT_CB, ); - let monitored_utxos_storage = storage.schema.new_vec::("monitored_utxos"); - let sync_label_storage = storage.schema.new_singleton::("sync_label"); - let counter_storage = storage.schema.new_singleton::("counter"); - - storage.restore_or_new(); + let monitored_utxos_storage = storage + .schema + .new_vec::("monitored_utxos") + .await; + let sync_label_storage = storage.schema.new_singleton::("sync_label").await; + let counter_storage = storage.schema.new_singleton::("counter").await; Self { storage, @@ -53,29 +56,25 @@ impl RustyWalletDatabase { } /// Get the hash of the block to which this database is synced. - pub fn get_sync_label(&self) -> Digest { - self.sync_label.get() + pub async fn get_sync_label(&self) -> Digest { + self.sync_label.get().await } - pub fn set_sync_label(&mut self, sync_label: Digest) { - self.sync_label.set(sync_label); + pub async fn set_sync_label(&mut self, sync_label: Digest) { + self.sync_label.set(sync_label).await; } - pub fn get_counter(&self) -> u64 { - self.counter.get() + pub async fn get_counter(&self) -> u64 { + self.counter.get().await } - pub fn set_counter(&mut self, counter: u64) { - self.counter.set(counter); + pub async fn set_counter(&mut self, counter: u64) { + self.counter.set(counter).await; } } impl StorageWriter for RustyWalletDatabase { - fn persist(&mut self) { - self.storage.persist() - } - - fn restore_or_new(&mut self) { - self.storage.restore_or_new() + async fn persist(&mut self) { + self.storage.persist().await } } diff --git a/src/models/state/wallet/utxo_notification_pool.rs b/src/models/state/wallet/utxo_notification_pool.rs index f98d3b47..4e4dee9e 100644 --- a/src/models/state/wallet/utxo_notification_pool.rs +++ b/src/models/state/wallet/utxo_notification_pool.rs @@ -1,4 +1,7 @@ -use crate::prelude::twenty_first; +use crate::{ + prelude::twenty_first, + util_types::mutator_set::{addition_record::AdditionRecord, commit}, +}; use anyhow::{bail, Result}; use bytesize::ByteSize; @@ -13,15 +16,12 @@ use std::{ use tracing::{error, info, warn}; use twenty_first::{shared_math::tip5::Digest, util_types::algebraic_hasher::AlgebraicHasher}; -use crate::{ - models::{ - blockchain::{ - shared::Hash, - transaction::{utxo::Utxo, Transaction}, - }, - peer::InstanceId, +use crate::models::{ + blockchain::{ + shared::Hash, + transaction::{utxo::Utxo, Transaction}, }, - util_types::mutator_set::{addition_record::AdditionRecord, mutator_set_trait::commit}, + peer::InstanceId, }; pub type Credibility = i32; diff --git a/src/models/state/wallet/wallet_state.rs b/src/models/state/wallet/wallet_state.rs index ca80a315..8f289c49 100644 --- a/src/models/state/wallet/wallet_state.rs +++ b/src/models/state/wallet/wallet_state.rs @@ -4,6 +4,9 @@ use crate::models::consensus::tasm::program::ConsensusProgram; use crate::models::consensus::timestamp::Timestamp; use crate::prelude::twenty_first; +use crate::database::storage::storage_schema::traits::*; +use crate::database::storage::storage_vec::traits::*; +use crate::database::NeptuneLevelDb; use anyhow::{bail, Result}; use itertools::Itertools; use num_traits::Zero; @@ -17,11 +20,8 @@ use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader, BufWriter}; use tracing::{debug, error, info, warn}; use twenty_first::shared_math::bfield_codec::BFieldCodec; use twenty_first::shared_math::digest::Digest; -use twenty_first::storage::level_db::DB; use twenty_first::util_types::algebraic_hasher::AlgebraicHasher; use twenty_first::util_types::emojihash_trait::Emojihash; -use twenty_first::util_types::storage_schema::traits::*; -use twenty_first::util_types::storage_vec::traits::*; use super::coin_with_possible_timelock::CoinWithPossibleTimeLock; use super::rusty_wallet_database::RustyWalletDatabase; @@ -37,7 +37,6 @@ use crate::models::state::wallet::monitored_utxo::MonitoredUtxo; use crate::util_types::mutator_set::addition_record::AdditionRecord; use crate::util_types::mutator_set::ms_membership_proof::MsMembershipProof; use crate::util_types::mutator_set::mutator_set_accumulator::MutatorSetAccumulator; -use crate::util_types::mutator_set::mutator_set_trait::MutatorSet; use crate::util_types::mutator_set::removal_record::{AbsoluteIndexSet, RemovalRecord}; use crate::Hash; @@ -166,10 +165,11 @@ impl WalletState { DataDirectory::create_dir_if_not_exists(&data_dir.wallet_database_dir_path()) .await .unwrap(); - let wallet_db = DB::open( + let wallet_db = NeptuneLevelDb::new( &data_dir.wallet_database_dir_path(), &crate::database::create_db_if_missing(), - ); + ) + .await; let wallet_db = match wallet_db { Ok(wdb) => wdb, Err(err) => { @@ -178,10 +178,8 @@ impl WalletState { } }; - let mut rusty_wallet_database = RustyWalletDatabase::connect(wallet_db); - rusty_wallet_database.restore_or_new(); - - let sync_label = rusty_wallet_database.get_sync_label(); + let rusty_wallet_database = RustyWalletDatabase::connect(wallet_db).await; + let sync_label = rusty_wallet_database.get_sync_label().await; let mut wallet_state = Self { wallet_db: rusty_wallet_database, @@ -230,7 +228,7 @@ impl WalletState { } /// Return a list of UTXOs spent by this wallet in the transaction - fn scan_for_spent_utxos( + async fn scan_for_spent_utxos( &self, transaction: &Transaction, ) -> Vec<(Utxo, AbsoluteIndexSet, u64)> { @@ -242,9 +240,12 @@ impl WalletState { .collect_vec(); let monitored_utxos = self.wallet_db.monitored_utxos(); - let mut spent_own_utxos = vec![]; - for (i, monitored_utxo) in monitored_utxos.iter() { + + let stream = monitored_utxos.stream().await; + pin_mut!(stream); // needed for iteration + + while let Some((i, monitored_utxo)) = stream.next().await { let abs_i = match monitored_utxo.get_latest_membership_proof_entry() { Some(msmp) => msmp.1.compute_indices(Hash::hash(&monitored_utxo.utxo)), None => continue, @@ -296,7 +297,7 @@ impl WalletState { let transaction: Transaction = new_block.kernel.body.transaction.clone(); let spent_inputs: Vec<(Utxo, AbsoluteIndexSet, u64)> = - self.scan_for_spent_utxos(&transaction); + self.scan_for_spent_utxos(&transaction).await; // utxo, sender randomness, receiver preimage, addition record let mut received_outputs: Vec<(AdditionRecord, Utxo, Digest, Digest)> = vec![]; @@ -327,7 +328,7 @@ impl WalletState { // block does not affect our balance if spent_inputs.is_empty() && addition_record_to_utxo_info.is_empty() - && monitored_utxos.is_empty() + && monitored_utxos.is_empty().await { return Ok(()); } @@ -338,40 +339,46 @@ impl WalletState { StrongUtxoKey, (MsMembershipProof, u64), > = HashMap::default(); - for (i, monitored_utxo) in monitored_utxos.iter() { - let utxo_digest = Hash::hash(&monitored_utxo.utxo); - match monitored_utxo - .get_membership_proof_for_block(new_block.kernel.header.prev_block_digest) - { - Some(ms_mp) => { - debug!("Found valid mp for UTXO"); - let replacement_success = valid_membership_proofs_and_own_utxo_count.insert( - StrongUtxoKey::new(utxo_digest, ms_mp.auth_path_aocl.leaf_index), - (ms_mp, i), - ); - assert!( - replacement_success.is_none(), - "Strong key must be unique in wallet DB" - ); - } - None => { - // Was MUTXO marked as abandoned? Then this is fine. Otherwise, log a warning. - // TODO: If MUTXO was spent, maybe we also don't want to maintain it? - if monitored_utxo.abandoned_at.is_some() { - debug!("Monitored UTXO with digest {utxo_digest} was marked as abandoned. Skipping."); - } else { - let confirmed_in_block_info = match monitored_utxo.confirmed_in_block { - Some(mutxo_received_in_block) => format!( - "UTXO was received at block height {}.", - mutxo_received_in_block.2 - ), - None => String::from("No info about when UTXO was confirmed."), - }; - warn!( - "Unable to find valid membership proof for UTXO with digest {utxo_digest}. {confirmed_in_block_info} Current block height is {}", new_block.kernel.header.height - ); - // panic!("Unable to find valid membership proof."); + { + let stream = monitored_utxos.stream().await; + pin_mut!(stream); // needed for iteration + + while let Some((i, monitored_utxo)) = stream.next().await { + let utxo_digest = Hash::hash(&monitored_utxo.utxo); + + match monitored_utxo + .get_membership_proof_for_block(new_block.kernel.header.prev_block_digest) + { + Some(ms_mp) => { + debug!("Found valid mp for UTXO"); + let replacement_success = valid_membership_proofs_and_own_utxo_count + .insert( + StrongUtxoKey::new(utxo_digest, ms_mp.auth_path_aocl.leaf_index), + (ms_mp, i), + ); + assert!( + replacement_success.is_none(), + "Strong key must be unique in wallet DB" + ); + } + None => { + // Was MUTXO marked as abandoned? Then this is fine. Otherwise, log a warning. + // TODO: If MUTXO was spent, maybe we also don't want to maintain it? + if monitored_utxo.abandoned_at.is_some() { + debug!("Monitored UTXO with digest {utxo_digest} was marked as abandoned. Skipping."); + } else { + let confirmed_in_block_info = match monitored_utxo.confirmed_in_block { + Some(mutxo_received_in_block) => format!( + "UTXO was received at block height {}.", + mutxo_received_in_block.2 + ), + None => String::from("No info about when UTXO was confirmed."), + }; + warn!( + "Unable to find valid membership proof for UTXO with digest {utxo_digest}. {confirmed_in_block_info} Current block height is {}", new_block.kernel.header.height + ); + } } } } @@ -404,7 +411,7 @@ impl WalletState { .map(|(mp, _index)| mp) .collect_vec(), &utxo_digests, - &msa_state.kernel, + &msa_state, addition_record, ); match updated_mp_indices { @@ -416,7 +423,7 @@ impl WalletState { } // Batch update removal records to keep them valid after next addition - RemovalRecord::batch_update_from_addition(&mut removal_records, &mut msa_state.kernel); + RemovalRecord::batch_update_from_addition(&mut removal_records, &msa_state); // If output UTXO belongs to us, add it to the list of monitored UTXOs and // add its membership proof to the list of managed membership proofs. @@ -449,7 +456,7 @@ impl WalletState { }; incoming_utxo_recovery_data_list.push(utxo_ms_recovery_data); - let mutxos_len = monitored_utxos.len(); + let mutxos_len = monitored_utxos.len().await; valid_membership_proofs_and_own_utxo_count.insert( StrongUtxoKey::new( @@ -466,7 +473,7 @@ impl WalletState { new_block.kernel.header.timestamp, new_block.kernel.header.height, )); - monitored_utxos.push(mutxo); + monitored_utxos.push(mutxo).await; } // Update mutator set to bring it to the correct state for the next call to batch-update @@ -474,18 +481,26 @@ impl WalletState { } // sanity check - let mutxo_with_valid_mps = monitored_utxos - .iter() - .filter(|(_i, mutxo)| { - mutxo.is_synced_to(new_block.kernel.header.prev_block_digest) - || mutxo.blockhash_to_membership_proof.is_empty() - }) - .count(); - assert_eq!( - mutxo_with_valid_mps, - valid_membership_proofs_and_own_utxo_count.len(), - "Monitored UTXO count must match number of managed membership proofs" - ); + { + let stream = monitored_utxos.stream_values().await; + pin_mut!(stream); // needed for iteration + + let mutxo_with_valid_mps = stream + .filter(|mutxo| { + futures::future::ready( + mutxo.is_synced_to(new_block.kernel.header.prev_block_digest) + || mutxo.blockhash_to_membership_proof.is_empty(), + ) + }) + .count() + .await; + + assert_eq!( + mutxo_with_valid_mps, + valid_membership_proofs_and_own_utxo_count.len(), + "Monitored UTXO count must match number of managed membership proofs" + ); + } // apply all removal records debug!("Block has {} removal records", removal_records.len()); @@ -493,7 +508,7 @@ impl WalletState { "Transaction has {} inputs", new_block.kernel.body.transaction.kernel.inputs.len() ); - let mut block_tx_input_count = 0; + let mut block_tx_input_count: usize = 0; while let Some(removal_record) = removal_records.pop() { let res = MsMembershipProof::batch_update_from_remove( &mut valid_membership_proofs_and_own_utxo_count @@ -525,13 +540,13 @@ impl WalletState { block_tx_input_count ); - let mut spent_mutxo = monitored_utxos.get(*mutxo_list_index); + let mut spent_mutxo = monitored_utxos.get(*mutxo_list_index).await; spent_mutxo.spent_in_block = Some(( new_block.hash(), new_block.kernel.header.timestamp, new_block.kernel.header.height, )); - monitored_utxos.set(*mutxo_list_index, spent_mutxo); + monitored_utxos.set(*mutxo_list_index, spent_mutxo).await; } } @@ -550,10 +565,15 @@ impl WalletState { changed_mps.dedup(); debug!("Number of mutated membership proofs: {}", changed_mps.len()); - let num_unspent_utxos = monitored_utxos - .iter() - .filter(|(_, m)| m.spent_in_block.is_none()) - .count(); + let num_unspent_utxos = { + let stream = monitored_utxos.stream_values().await; + pin_mut!(stream); // needed for iteration + + stream + .filter(|m| futures::future::ready(m.spent_in_block.is_none())) + .count() + .await + }; debug!("Number of unspent UTXOs: {}", num_unspent_utxos); @@ -561,7 +581,7 @@ impl WalletState { valid_membership_proofs_and_own_utxo_count.iter() { let StrongUtxoKey { utxo_digest, .. } = strong_utxo_key; - let mut monitored_utxo = monitored_utxos.get(*own_utxo_index); + let mut monitored_utxo = monitored_utxos.get(*own_utxo_index).await; monitored_utxo.add_membership_proof_for_tip(new_block.hash(), updated_ms_mp.to_owned()); // Sanity check that membership proofs of non-spent transactions are still valid @@ -570,7 +590,7 @@ impl WalletState { || msa_state.verify(utxo_digest, updated_ms_mp) ); - monitored_utxos.set(*own_utxo_index, monitored_utxo); + monitored_utxos.set(*own_utxo_index, monitored_utxo).await; // TODO: What if a newly added transaction replaces a transaction that was in another fork? // How do we ensure that this transaction is not counted twice? @@ -583,8 +603,8 @@ impl WalletState { self.store_utxo_ms_recovery_data(item).await?; } - self.wallet_db.set_sync_label(new_block.hash()); - self.wallet_db.persist(); + self.wallet_db.set_sync_label(new_block.hash()).await; + self.wallet_db.persist().await; // Mark all expected UTXOs that were received in this block as received expected_utxos_in_this_block @@ -599,7 +619,7 @@ impl WalletState { } pub async fn is_synced_to(&self, tip_hash: Digest) -> bool { - let db_sync_digest = self.wallet_db.get_sync_label(); + let db_sync_digest = self.wallet_db.get_sync_label().await; if db_sync_digest != tip_hash { return false; } @@ -608,19 +628,26 @@ impl WalletState { // We assume that the membership proof can only be stored // if it is valid for the given block hash, so there is // no need to test validity here. - let synced = monitored_utxos - .iter() - .all(|(_, m)| m.get_membership_proof_for_block(tip_hash).is_some()); - synced + let stream = monitored_utxos.stream_values().await; + pin_mut!(stream); // needed for iteration + + stream + .all(|m| futures::future::ready(m.get_membership_proof_for_block(tip_hash).is_some())) + .await } - pub fn get_wallet_status_from_lock(&self, tip_digest: Digest) -> WalletStatus { + pub async fn get_wallet_status_from_lock(&self, tip_digest: Digest) -> WalletStatus { let monitored_utxos = self.wallet_db.monitored_utxos(); let mut synced_unspent = vec![]; let mut unsynced_unspent = vec![]; let mut synced_spent = vec![]; let mut unsynced_spent = vec![]; - for (_i, mutxo) in monitored_utxos.iter() { + + let stream = monitored_utxos.stream().await; + pin_mut!(stream); // needed for iteration + + while let Some((_i, mutxo)) = stream.next().await { + // for (_i, mutxo) in monitored_utxos.iter() { let utxo = mutxo.utxo.clone(); let spent = mutxo.spent_in_block.is_some(); if let Some(mp) = mutxo.get_membership_proof_for_block(tip_digest) { @@ -664,7 +691,7 @@ impl WalletState { // TODO: Should return the correct spending keys associated with the UTXOs // We only attempt to generate a transaction using those UTXOs that have up-to-date // membership proofs. - let wallet_status = self.get_wallet_status_from_lock(tip_digest); + let wallet_status = self.get_wallet_status_from_lock(tip_digest).await; // First check that we have enough. Otherwise return an error. if wallet_status.synced_unspent_available_amount(timestamp) < requested_amount { @@ -718,7 +745,10 @@ impl WalletState { let monitored_utxos = self.wallet_db.monitored_utxos(); let mut own_coins = vec![]; - for (_i, mutxo) in monitored_utxos.iter() { + let stream = monitored_utxos.stream_values().await; + pin_mut!(stream); // needed for iteration + + while let Some(mutxo) = stream.next().await { if mutxo.spent_in_block.is_some() || mutxo.abandoned_at.is_some() || mutxo.get_latest_membership_proof_entry().is_none() @@ -778,7 +808,8 @@ mod tests { .wallet_state .wallet_db .monitored_utxos() - .len(); + .len() + .await; let mut mutator_set_accumulator = genesis_block.kernel.body.mutator_set_accumulator.clone(); assert!( monitored_utxos_count_init.is_zero(), @@ -825,6 +856,7 @@ mod tests { .wallet_db .monitored_utxos() .len() + .await .is_zero(), "Monitored UTXO list must be empty at height 2" ); @@ -877,6 +909,7 @@ mod tests { .wallet_db .monitored_utxos() .len() + .await .is_one(), "Monitored UTXO list must have length 1 at block 3a" ); @@ -886,6 +919,7 @@ mod tests { .wallet_db .monitored_utxos() .get(0) + .await .abandoned_at .is_none(), "MUTXO may not be marked as abandoned at block 3a" @@ -924,7 +958,7 @@ mod tests { .wallet_db .monitored_utxos() - .get(0) + .get(0).await .abandoned_at .is_none(), "MUTXO may not be marked as abandoned at block 3b, as the abandoned chain is not yet old enough and has not been pruned" @@ -979,6 +1013,7 @@ mod tests { .wallet_db .monitored_utxos() .get(0) + .await .abandoned_at .is_none(), "MUTXO must not be abandoned at height 11" @@ -1016,6 +1051,7 @@ mod tests { .wallet_db .monitored_utxos() .get(0) + .await .abandoned_at .is_none(), "MUTXO must *not* be marked as abandoned at height 12, prior to pruning" @@ -1036,6 +1072,7 @@ mod tests { .wallet_db .monitored_utxos() .get(0) + .await .abandoned_at .unwrap(), "MUTXO must be marked as abandoned at height 12, after pruning" @@ -1057,16 +1094,16 @@ mod tests { // are we synchronized to the genesis block? assert_eq!( - wallet_state.wallet_db.get_sync_label(), + wallet_state.wallet_db.get_sync_label().await, genesis_block.hash() ); // Do we have valid membership proofs for all UTXOs received in the genesis block? let monitored_utxos = wallet_state.wallet_db.monitored_utxos(); - let num_monitored_utxos = monitored_utxos.len(); + let num_monitored_utxos = monitored_utxos.len().await; assert!(num_monitored_utxos > 0); for i in 0..num_monitored_utxos { - let monitored_utxo: MonitoredUtxo = monitored_utxos.get(i); + let monitored_utxo: MonitoredUtxo = monitored_utxos.get(i).await; if let Some((digest, _duration, _height)) = monitored_utxo.confirmed_in_block { assert_eq!(digest, genesis_block.hash()); } else { diff --git a/src/peer_loop.rs b/src/peer_loop.rs index 17a47c31..cc35ffdf 100644 --- a/src/peer_loop.rs +++ b/src/peer_loop.rs @@ -1226,7 +1226,7 @@ mod peer_loop_tests { let (peer_address1, instance_id1) = (peer_infos[1].connected_address, peer_infos[1].instance_id); - let (hsd2, sa2) = get_dummy_peer_connection_data_genesis(Network::Alpha, 2); + let (hsd2, sa2) = get_dummy_peer_connection_data_genesis(Network::Alpha, 2).await; let expected_response = vec![ (peer_address0, instance_id0), (peer_address1, instance_id1), @@ -1876,7 +1876,7 @@ mod peer_loop_tests { let mut global_state_mut = state_lock.lock_guard_mut().await; - let (hsd1, peer_address1) = get_dummy_peer_connection_data_genesis(Network::Alpha, 1); + let (hsd1, peer_address1) = get_dummy_peer_connection_data_genesis(Network::Alpha, 1).await; let genesis_block: Block = global_state_mut .chain .archival_state() @@ -2277,7 +2277,7 @@ mod peer_loop_tests { add_block(&mut global_state_mut, block_1.clone()).await?; drop(global_state_mut); - let (hsd_1, sa_1) = get_dummy_peer_connection_data_genesis(network, 1); + let (hsd_1, sa_1) = get_dummy_peer_connection_data_genesis(network, 1).await; let expected_peer_list_resp = vec![ ( peer_infos[0].listen_address().unwrap(), @@ -2366,7 +2366,7 @@ mod peer_loop_tests { Action::Read(PeerMessage::Bye), ]); - let (hsd_1, _sa_1) = get_dummy_peer_connection_data_genesis(Network::Alpha, 1); + let (hsd_1, _sa_1) = get_dummy_peer_connection_data_genesis(Network::Alpha, 1).await; let peer_loop_handler = PeerLoopHandler::new( to_main_tx, state_lock.clone(), @@ -2411,7 +2411,7 @@ mod peer_loop_tests { Action::Read(PeerMessage::Bye), ]); - let (hsd_1, _sa_1) = get_dummy_peer_connection_data_genesis(Network::Alpha, 1); + let (hsd_1, _sa_1) = get_dummy_peer_connection_data_genesis(Network::Alpha, 1).await; let peer_loop_handler = PeerLoopHandler::new( to_main_tx, state_lock.clone(), diff --git a/src/rpc_server.rs b/src/rpc_server.rs index 8ab46d4c..2fa1cf5f 100644 --- a/src/rpc_server.rs +++ b/src/rpc_server.rs @@ -494,6 +494,11 @@ impl RPC for NeptuneRPCServer { let utxo = Utxo::new(address.lock_script(), coins); let now = Timestamp::now(); + // note: for future changes: + // No consensus data should be read within this read-lock. + // Else a write lock must be used instead and held until + // create_transaction() completes, so entire op is atomic. + // See: https://github.com/Neptune-Crypto/neptune-core/issues/134 let state = self.state.lock_guard().await; let block_height = state.chain.light_state().header().height; let receiver_privacy_digest = address.privacy_digest; @@ -534,6 +539,8 @@ impl RPC for NeptuneRPCServer { .await; } + // All cryptographic data must be in relation to a single block + // and a write-lock must therefore be held over GlobalState to ensure this. let transaction_result = self .state .lock_guard_mut() diff --git a/src/tests/shared.rs b/src/tests/shared.rs index a03b6170..693cc9da 100644 --- a/src/tests/shared.rs +++ b/src/tests/shared.rs @@ -5,6 +5,8 @@ use crate::models::blockchain::type_scripts::neptune_coins::NeptuneCoins; use crate::models::consensus::timestamp::Timestamp; use crate::models::consensus::ValidityTree; use crate::prelude::twenty_first; +use crate::util_types::mutator_set::commit; +use crate::util_types::mutator_set::get_swbf_indices; use anyhow::Result; use bytes::{Bytes, BytesMut}; @@ -27,7 +29,6 @@ use std::path::Path; use std::path::PathBuf; use std::time::SystemTime; use std::{collections::HashMap, env, net::SocketAddr, pin::Pin, str::FromStr, sync::Arc}; -use tasm_lib::triton_vm::proof::Proof; use tasm_lib::twenty_first::util_types::mmr::mmr_accumulator::MmrAccumulator; use tokio::sync::{broadcast, mpsc}; use tokio_serde::{formats::SymmetricalBincode, Serializer}; @@ -79,9 +80,6 @@ use crate::util_types::mutator_set::chunk_dictionary::pseudorandom_chunk_diction use crate::util_types::mutator_set::ms_membership_proof::pseudorandom_mutator_set_membership_proof; use crate::util_types::mutator_set::ms_membership_proof::MsMembershipProof; use crate::util_types::mutator_set::mutator_set_accumulator::MutatorSetAccumulator; -use crate::util_types::mutator_set::mutator_set_kernel::get_swbf_indices; -use crate::util_types::mutator_set::mutator_set_trait::commit; -use crate::util_types::mutator_set::mutator_set_trait::MutatorSet; use crate::util_types::mutator_set::removal_record::AbsoluteIndexSet; use crate::util_types::mutator_set::removal_record::RemovalRecord; use crate::util_types::test_shared::mutator_set::pseudorandom_mmra; @@ -135,7 +133,7 @@ pub fn get_dummy_version() -> String { "0.1.0".to_string() } -pub fn get_dummy_latest_block( +pub async fn get_dummy_latest_block( input_block: Option, ) -> (Block, LatestBlockInfo, Arc>) { let network = Network::RegTest; @@ -154,10 +152,15 @@ pub fn get_dummy_latest_block( } /// Return a handshake object with a randomly set instance ID -pub fn get_dummy_handshake_data_for_genesis(network: Network) -> HandshakeData { +pub async fn get_dummy_handshake_data_for_genesis(network: Network) -> HandshakeData { HandshakeData { instance_id: rand::random(), - tip_header: get_dummy_latest_block(None).2.lock().unwrap().to_owned(), + tip_header: get_dummy_latest_block(None) + .await + .2 + .lock() + .unwrap() + .to_owned(), listen_port: Some(8080), network, version: get_dummy_version(), @@ -173,11 +176,11 @@ pub fn to_bytes(message: &PeerMessage) -> Result { Ok(buf.freeze()) } -pub fn get_dummy_peer_connection_data_genesis( +pub async fn get_dummy_peer_connection_data_genesis( network: Network, id: u8, ) -> (HandshakeData, SocketAddr) { - let handshake = get_dummy_handshake_data_for_genesis(network); + let handshake = get_dummy_handshake_data_for_genesis(network).await; let socket_address = get_dummy_socket_address(id); (handshake, socket_address) @@ -201,7 +204,7 @@ pub async fn get_mock_global_state( peer_map.insert(peer_address, get_dummy_peer(peer_address)); } let networking_state = NetworkingState::new(peer_map, peer_db, syncing); - let (block, _, _) = get_dummy_latest_block(None); + let (block, _, _) = get_dummy_latest_block(None).await; let light_state: LightState = LightState::from(block.clone()); let blockchain_state = BlockchainState::Archival(BlockchainArchivalState { light_state, @@ -254,7 +257,7 @@ pub async fn get_test_genesis_setup( to_main_tx, _to_main_rx1, state, - get_dummy_handshake_data_for_genesis(network), + get_dummy_handshake_data_for_genesis(network).await, )) } @@ -734,7 +737,7 @@ pub fn random_option(thing: T) -> Option { // TODO: Consider moving this to to the appropriate place in global state, // keep fn interface. Can be helper function to `create_transaction`. -pub fn make_mock_transaction_with_generation_key( +pub async fn make_mock_transaction_with_generation_key( input_utxos_mps_keys: Vec<(Utxo, MsMembershipProof, generation_address::SpendingKey)>, receiver_data: Vec, fee: NeptuneCoins, @@ -743,7 +746,7 @@ pub fn make_mock_transaction_with_generation_key( // Generate removal records let mut inputs = vec![]; for (input_utxo, input_mp, _) in input_utxos_mps_keys.iter() { - let removal_record = tip_msa.kernel.drop(Hash::hash(input_utxo), input_mp); + let removal_record = tip_msa.drop(Hash::hash(input_utxo), input_mp); inputs.push(removal_record); } @@ -957,7 +960,7 @@ pub fn make_mock_block( }; ( - Block::new(block_header, block_body, Some(Proof(vec![]))), + Block::new(block_header, block_body, Block::mk_std_block_type(None)), coinbase_utxo, coinbase_output_randomness, ) diff --git a/src/util_types/mod.rs b/src/util_types/mod.rs index d7e70725..23e558a6 100644 --- a/src/util_types/mod.rs +++ b/src/util_types/mod.rs @@ -1,5 +1,4 @@ pub mod mutator_set; -pub mod sync; #[cfg(test)] pub mod test_shared; diff --git a/src/util_types/mutator_set.rs b/src/util_types/mutator_set.rs index ff95c881..edc05bb4 100644 --- a/src/util_types/mutator_set.rs +++ b/src/util_types/mutator_set.rs @@ -1,5 +1,24 @@ +use std::{error::Error, fmt}; + +use itertools::Itertools; +use tasm_lib::{ + twenty_first::{ + shared_math::bfield_codec::BFieldCodec, + util_types::algebraic_hasher::{AlgebraicHasher, Sponge}, + }, + Digest, +}; + +use crate::models::blockchain::shared::Hash; + +use self::{ + addition_record::AdditionRecord, + shared::{BATCH_SIZE, CHUNK_SIZE, NUM_TRIALS, WINDOW_SIZE}, +}; + pub mod active_window; pub mod addition_record; +pub mod archival_mmr; pub mod archival_mutator_set; pub mod boxed_big_array; pub mod chunk; @@ -8,9 +27,613 @@ pub mod mmra_and_membership_proofs; pub mod ms_membership_proof; pub mod msa_and_records; pub mod mutator_set_accumulator; -pub mod mutator_set_kernel; -pub mod mutator_set_trait; pub mod removal_record; pub mod root_and_paths; pub mod rusty_archival_mutator_set; pub mod shared; + +impl Error for MutatorSetError {} + +impl fmt::Display for MutatorSetError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +#[derive(PartialEq, Eq, Debug)] +pub enum MutatorSetError { + RequestedAoclAuthPathOutOfBounds((u64, u64)), + RequestedSwbfAuthPathOutOfBounds((u64, u64)), + MutatorSetIsEmpty, +} + +/// Get the (absolute) indices for removing this item from the mutator set. +pub fn get_swbf_indices( + item: Digest, + sender_randomness: Digest, + receiver_preimage: Digest, + aocl_leaf_index: u64, +) -> [u128; NUM_TRIALS as usize] { + let batch_index: u128 = aocl_leaf_index as u128 / BATCH_SIZE as u128; + let batch_offset: u128 = batch_index * CHUNK_SIZE as u128; + let leaf_index_bfes = aocl_leaf_index.encode(); + let input = [ + item.encode(), + sender_randomness.encode(), + receiver_preimage.encode(), + leaf_index_bfes, + ] + .concat(); + + let mut sponge = Hash::init(); + Hash::pad_and_absorb_all(&mut sponge, &input); + Hash::sample_indices(&mut sponge, WINDOW_SIZE, NUM_TRIALS as usize) + .into_iter() + .map(|sample_index| sample_index as u128 + batch_offset) + .collect_vec() + .try_into() + .unwrap() +} + +/// Generates an addition record from an item and explicit random- +/// ness. The addition record is itself a commitment to the item. +pub fn commit(item: Digest, sender_randomness: Digest, receiver_digest: Digest) -> AdditionRecord { + let canonical_commitment = + Hash::hash_pair(Hash::hash_pair(item, sender_randomness), receiver_digest); + + AdditionRecord::new(canonical_commitment) +} + +#[cfg(test)] +mod accumulation_scheme_tests { + use accumulation_scheme_tests::ms_membership_proof::MsMembershipProof; + use accumulation_scheme_tests::removal_record::RemovalRecord; + use rand::prelude::*; + use rand::Rng; + use tasm_lib::twenty_first::util_types::mmr::mmr_trait::Mmr; + + use crate::util_types::mutator_set::mutator_set_accumulator::MutatorSetAccumulator; + use crate::util_types::test_shared::mutator_set::*; + + use super::*; + + #[test] + fn get_batch_index_test() { + // Verify that the method to get batch index returns sane results + + let mut mutator_set = MutatorSetAccumulator::default(); + assert_eq!( + 0, + mutator_set.get_batch_index(), + "Batch index for empty MS must be zero" + ); + + for i in 0..BATCH_SIZE { + let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); + let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); + mutator_set.add(&addition_record); + assert_eq!( + 0, + mutator_set.get_batch_index(), + "Batch index must be 0 after adding {} elements", + i + ); + } + + let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); + let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); + mutator_set.add(&addition_record); + assert_eq!( + 1, + mutator_set.get_batch_index(), + "Batch index must be one after adding BATCH_SIZE+1 elements" + ); + } + + #[tokio::test] + async fn mutator_set_hash_test() { + let empty_set = MutatorSetAccumulator::default(); + let empty_hash = empty_set.hash(); + + // Add one element to append-only commitment list + let mut set_with_aocl_append = MutatorSetAccumulator::default(); + + let (item0, _sender_randomness, _receiver_preimage) = make_item_and_randomnesses(); + + set_with_aocl_append.aocl.append(item0); + let hash_of_aocl_append = set_with_aocl_append.hash(); + + assert_ne!( + empty_hash, hash_of_aocl_append, + "Appending to AOCL must change MutatorSet commitment" + ); + + // Manipulate inactive SWBF + let mut set_with_swbf_inactive_append = MutatorSetAccumulator::default(); + set_with_swbf_inactive_append.swbf_inactive.append(item0); + let hash_of_one_in_inactive = set_with_swbf_inactive_append.hash(); + assert_ne!( + empty_hash, hash_of_one_in_inactive, + "Changing inactive must change MS hash" + ); + assert_ne!( + hash_of_aocl_append, hash_of_one_in_inactive, + "One in AOCL and one in inactive must hash to different digests" + ); + + // Manipulate active window + let mut active_window_changed = empty_set; + active_window_changed.swbf_active.insert(42); + assert_ne!( + empty_hash, + active_window_changed.hash(), + "Changing active window must change commitment" + ); + + // Sanity check bc reasons + active_window_changed.swbf_active.remove(42); + assert_eq!( + empty_hash, + active_window_changed.hash(), + "Commitment to empty MS must be consistent" + ); + } + + #[test] + fn ms_get_indices_test() { + // Test that `get_indices` behaves as expected, i.e. + // that it always returns something of length `NUM_TRIALS`, and that the + // returned values are in the expected range. + + let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); + let ret: [u128; NUM_TRIALS as usize] = + get_swbf_indices(item, sender_randomness, receiver_preimage, 0); + assert_eq!(NUM_TRIALS as usize, ret.len()); + assert!(ret.iter().all(|&x| x < WINDOW_SIZE as u128)); + } + + #[test] + fn ms_get_indices_test_big() { + // Test that `get_indices` behaves as expected. I.e. that it returns indices in the correct range, + // and always returns something of length `NUM_TRIALS`. + + for _ in 0..1000 { + let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); + let ret: [u128; NUM_TRIALS as usize] = + get_swbf_indices(item, sender_randomness, receiver_preimage, 0); + assert_eq!(NUM_TRIALS as usize, ret.len()); + assert!(ret.iter().all(|&x| x < WINDOW_SIZE as u128)); + } + + for _ in 0..1000 { + let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); + let ret: [u128; NUM_TRIALS as usize] = get_swbf_indices( + item, + sender_randomness, + receiver_preimage, + (17 * BATCH_SIZE) as u64, + ); + assert_eq!(NUM_TRIALS as usize, ret.len()); + assert!(ret + .iter() + .all(|&x| (x as u32) < WINDOW_SIZE + 17 * CHUNK_SIZE + && (x as u32) >= 17 * CHUNK_SIZE)); + } + } + + #[tokio::test] + async fn init_test() { + let accumulator = MutatorSetAccumulator::default(); + let mut rms = empty_rusty_mutator_set().await; + let archival = rms.ams_mut(); + + // Verify that function to get batch index does not overflow for the empty MS + assert_eq!( + 0, + accumulator.get_batch_index(), + "Batch index must be zero for empty MS accumulator" + ); + assert_eq!( + 0, + archival.get_batch_index_async().await, + "Batch index must be zero for empty archival MS" + ); + } + + #[test] + fn verify_future_indices_test() { + // Ensure that `verify` does not crash when given a membership proof + // that represents a future addition to the AOCL. + + let mut mutator_set = MutatorSetAccumulator::default(); + let empty_mutator_set = MutatorSetAccumulator::default(); + + for _ in 0..2 * BATCH_SIZE + 2 { + let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); + + let addition_record: AdditionRecord = + commit(item, sender_randomness, receiver_preimage.hash::()); + let membership_proof: MsMembershipProof = + mutator_set.prove(item, sender_randomness, receiver_preimage); + mutator_set.add_helper(&addition_record); + assert!(mutator_set.verify(item, &membership_proof)); + + // Verify that a future membership proof returns false and does not crash + assert!(!empty_mutator_set.verify(item, &membership_proof)); + } + } + + #[test] + fn test_membership_proof_update_from_add() { + let mut mutator_set = MutatorSetAccumulator::default(); + let (own_item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); + + let addition_record = commit( + own_item, + sender_randomness, + receiver_preimage.hash::(), + ); + let mut membership_proof = + mutator_set.prove(own_item, sender_randomness, receiver_preimage); + mutator_set.add_helper(&addition_record); + + // Update membership proof with add operation. Verify that it has changed, and that it now fails to verify. + let (new_item, new_sender_randomness, new_receiver_preimage) = make_item_and_randomnesses(); + let new_addition_record = commit( + new_item, + new_sender_randomness, + new_receiver_preimage.hash::(), + ); + let original_membership_proof = membership_proof.clone(); + let changed_mp = match membership_proof.update_from_addition( + own_item, + &mutator_set, + &new_addition_record, + ) { + Ok(changed) => changed, + Err(err) => panic!("{}", err), + }; + assert!( + changed_mp, + "Update must indicate that membership proof has changed" + ); + assert_ne!( + original_membership_proof.auth_path_aocl, + membership_proof.auth_path_aocl + ); + assert!( + mutator_set.verify(own_item, &original_membership_proof), + "Original membership proof must verify prior to addition" + ); + assert!( + !mutator_set.verify(own_item, &membership_proof), + "New membership proof must fail to verify prior to addition" + ); + + // Insert the new element into the mutator set, then verify that the membership proof works and + // that the original membership proof is invalid. + mutator_set.add_helper(&new_addition_record); + assert!( + !mutator_set.verify(own_item, &original_membership_proof), + "Original membership proof must fail to verify after addition" + ); + assert!( + mutator_set.verify(own_item, &membership_proof), + "New membership proof must verify after addition" + ); + } + + #[test] + fn membership_proof_updating_from_add_pbt() { + let mut rng = thread_rng(); + + let mut mutator_set = MutatorSetAccumulator::default(); + + let num_additions = rng.gen_range(0..=100i32); + println!( + "running multiple additions test for {} additions", + num_additions + ); + + let mut membership_proofs_and_items: Vec<(MsMembershipProof, Digest)> = vec![]; + for i in 0..num_additions { + println!("loop iteration {}", i); + + let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); + + let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); + let membership_proof = mutator_set.prove(item, sender_randomness, receiver_preimage); + + // Update all membership proofs + for (mp, itm) in membership_proofs_and_items.iter_mut() { + let original_mp = mp.clone(); + let changed_res = mp.update_from_addition(*itm, &mutator_set, &addition_record); + assert!(changed_res.is_ok()); + + // verify that the boolean returned value from the updater method is set correctly + assert_eq!(changed_res.unwrap(), original_mp != *mp); + } + + // Add the element + assert!(!mutator_set.verify(item, &membership_proof)); + mutator_set.add_helper(&addition_record); + assert!(mutator_set.verify(item, &membership_proof)); + membership_proofs_and_items.push((membership_proof, item)); + + // Verify that all membership proofs work + assert!(membership_proofs_and_items + .clone() + .into_iter() + .all(|(mp, itm)| mutator_set.verify(itm, &mp))); + } + } + + #[test] + fn test_add_and_prove() { + let mut mutator_set = MutatorSetAccumulator::default(); + let (item0, sender_randomness0, receiver_preimage0) = make_item_and_randomnesses(); + + let addition_record = commit(item0, sender_randomness0, receiver_preimage0.hash::()); + let membership_proof = mutator_set.prove(item0, sender_randomness0, receiver_preimage0); + + assert!(!mutator_set.verify(item0, &membership_proof)); + + mutator_set.add_helper(&addition_record); + + assert!(mutator_set.verify(item0, &membership_proof)); + + // Insert a new item and verify that this still works + let (item1, sender_randomness1, receiver_preimage1) = make_item_and_randomnesses(); + let new_ar = commit(item1, sender_randomness1, receiver_preimage1.hash::()); + let new_mp = mutator_set.prove(item1, sender_randomness1, receiver_preimage1); + assert!(!mutator_set.verify(item1, &new_mp)); + + mutator_set.add_helper(&new_ar); + assert!(mutator_set.verify(item1, &new_mp)); + + // Insert ~2*BATCH_SIZE more elements and + // verify that it works throughout. The reason we insert this many + // is that we want to make sure that the window slides into a new + // position. + for _ in 0..2 * BATCH_SIZE + 4 { + let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); + let other_ar = commit(item, sender_randomness, receiver_preimage.hash::()); + let other_mp = mutator_set.prove(item, sender_randomness, receiver_preimage); + assert!(!mutator_set.verify(item, &other_mp)); + + mutator_set.add_helper(&other_ar); + assert!(mutator_set.verify(item, &other_mp)); + } + } + + #[test] + fn batch_update_from_addition_and_removal_test() { + let mut mutator_set = MutatorSetAccumulator::default(); + + // It's important to test number of additions around the shifting of the window, + // i.e. around batch size. + let num_additions_list = vec![ + 1, + 2, + BATCH_SIZE - 1, + BATCH_SIZE, + BATCH_SIZE + 1, + 6 * BATCH_SIZE - 1, + 6 * BATCH_SIZE, + 6 * BATCH_SIZE + 1, + ]; + + let mut membership_proofs: Vec = vec![]; + let mut items = vec![]; + + for num_additions in num_additions_list { + for _ in 0..num_additions { + let (new_item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); + + let addition_record = commit( + new_item, + sender_randomness, + receiver_preimage.hash::(), + ); + let membership_proof = + mutator_set.prove(new_item, sender_randomness, receiver_preimage); + + // Update *all* membership proofs with newly added item + let batch_update_res = MsMembershipProof::batch_update_from_addition( + &mut membership_proofs.iter_mut().collect::>(), + &items, + &mutator_set, + &addition_record, + ); + assert!(batch_update_res.is_ok()); + + mutator_set.add_helper(&addition_record); + assert!(mutator_set.verify(new_item, &membership_proof)); + + for (mp, &item) in membership_proofs.iter().zip(items.iter()) { + assert!(mutator_set.verify(item, mp)); + } + + membership_proofs.push(membership_proof); + items.push(new_item); + } + + // Remove items from MS, and verify correct updating of membership proofs + for _ in 0..num_additions { + let item = items.pop().unwrap(); + let mp = membership_proofs.pop().unwrap(); + assert!(mutator_set.verify(item, &mp)); + + // generate removal record + let removal_record: RemovalRecord = mutator_set.drop(item, &mp); + assert!(removal_record.validate(&mutator_set)); + assert!(mutator_set.can_remove(&removal_record)); + + // update membership proofs + let res = MsMembershipProof::batch_update_from_remove( + &mut membership_proofs.iter_mut().collect::>(), + &removal_record, + ); + assert!(res.is_ok()); + + // remove item from set + mutator_set.remove_helper(&removal_record); + assert!(!mutator_set.verify(item, &mp)); + + for (&itm, membp) in items.iter().zip(membership_proofs.iter()) { + assert!(mutator_set.verify(itm, membp)); + } + } + } + } + + #[test] + fn test_multiple_adds() { + let mut mutator_set = MutatorSetAccumulator::default(); + + let num_additions = 65; + + let mut items_and_membership_proofs: Vec<(Digest, MsMembershipProof)> = vec![]; + + for _ in 0..num_additions { + let (new_item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); + + let addition_record = commit( + new_item, + sender_randomness, + receiver_preimage.hash::(), + ); + let membership_proof = + mutator_set.prove(new_item, sender_randomness, receiver_preimage); + + // Update *all* membership proofs with newly added item + for (updatee_item, mp) in items_and_membership_proofs.iter_mut() { + let original_mp = mp.clone(); + assert!(mutator_set.verify(*updatee_item, mp)); + let changed_res = + mp.update_from_addition(*updatee_item, &mutator_set, &addition_record); + assert!(changed_res.is_ok()); + + // verify that the boolean returned value from the updater method is set correctly + assert_eq!(changed_res.unwrap(), original_mp != *mp); + } + + mutator_set.add_helper(&addition_record); + assert!(mutator_set.verify(new_item, &membership_proof)); + + (0..items_and_membership_proofs.len()).for_each(|j| { + let (old_item, mp) = &items_and_membership_proofs[j]; + assert!(mutator_set.verify(*old_item, mp)) + }); + + items_and_membership_proofs.push((new_item, membership_proof)); + } + + // Verify all membership proofs + (0..items_and_membership_proofs.len()).for_each(|k| { + assert!(mutator_set.verify( + items_and_membership_proofs[k].0, + &items_and_membership_proofs[k].1, + )); + }); + + // Remove items from MS, and verify correct updating of membership proof + (0..num_additions).for_each(|i| { + (i..items_and_membership_proofs.len()).for_each(|k| { + assert!(mutator_set.verify( + items_and_membership_proofs[k].0, + &items_and_membership_proofs[k].1, + )); + }); + let (item, mp) = items_and_membership_proofs[i].clone(); + + assert!(mutator_set.verify(item, &mp)); + + // generate removal record + let removal_record: RemovalRecord = mutator_set.drop(item, &mp); + assert!(removal_record.validate(&mutator_set)); + assert!(mutator_set.can_remove(&removal_record)); + (i..items_and_membership_proofs.len()).for_each(|k| { + assert!(mutator_set.verify( + items_and_membership_proofs[k].0, + &items_and_membership_proofs[k].1, + )); + }); + + // update membership proofs + ((i + 1)..num_additions).for_each(|j| { + assert!(mutator_set.verify( + items_and_membership_proofs[j].0, + &items_and_membership_proofs[j].1 + )); + let update_res = items_and_membership_proofs[j] + .1 + .update_from_remove(&removal_record.clone()); + assert!(update_res.is_ok()); + }); + + // remove item from set + mutator_set.remove_helper(&removal_record); + assert!(!mutator_set.verify(item, &mp)); + + ((i + 1)..items_and_membership_proofs.len()).for_each(|k| { + assert!(mutator_set.verify( + items_and_membership_proofs[k].0, + &items_and_membership_proofs[k].1, + )); + }); + }); + } + + #[test] + fn ms_serialization_test() { + // This test verifies that the mutator set structure can be serialized and deserialized. + // When Rust spawns threads (as it does when it runs tests, and in the Neptune Core client), + // the new threads only get 2MB stack memory initially. This can result in stack overflows + // in the runtime. This test is to verify that that does not happen. + // Cf. https://stackoverflow.com/questions/72618777/how-to-deserialize-a-nested-big-array + // and https://stackoverflow.com/questions/72621410/how-do-i-use-serde-stacker-in-my-deserialize-implementation + let mut mutator_set = MutatorSetAccumulator::default(); + + let json_empty = serde_json::to_string(&mutator_set).unwrap(); + println!("json = \n{}", json_empty); + let s_back = serde_json::from_str::(&json_empty).unwrap(); + assert!(s_back.aocl.is_empty()); + assert!(s_back.swbf_inactive.is_empty()); + assert!(s_back.swbf_active.sbf.is_empty()); + + // Add an item, verify correct serialization + let (mp, item) = insert_mock_item(&mut mutator_set); + let json_one_add = serde_json::to_string(&mutator_set).unwrap(); + println!("json_one_add = \n{}", json_one_add); + let s_back_one_add = serde_json::from_str::(&json_one_add).unwrap(); + assert_eq!(1, s_back_one_add.aocl.count_leaves()); + assert!(s_back_one_add.swbf_inactive.is_empty()); + assert!(s_back_one_add.swbf_active.sbf.is_empty()); + assert!(s_back_one_add.verify(item, &mp)); + + // Remove an item, verify correct serialization + remove_mock_item(&mut mutator_set, item, &mp); + let json_one_add_one_remove = serde_json::to_string(&mutator_set).unwrap(); + println!("json_one_add = \n{}", json_one_add_one_remove); + let s_back_one_add_one_remove = + serde_json::from_str::(&json_one_add_one_remove).unwrap(); + assert_eq!( + 1, + s_back_one_add_one_remove.aocl.count_leaves(), + "AOCL must still have exactly one leaf" + ); + assert!( + s_back_one_add_one_remove.swbf_inactive.is_empty(), + "Window should not have moved" + ); + assert!( + !s_back_one_add_one_remove.swbf_active.sbf.is_empty(), + "Some of the indices in the active window must now be set" + ); + assert!( + !s_back_one_add_one_remove.verify(item, &mp), + "Membership proof must fail after removal" + ); + } +} diff --git a/src/util_types/mutator_set/addition_record.rs b/src/util_types/mutator_set/addition_record.rs index 65d02933..7a7e0b15 100644 --- a/src/util_types/mutator_set/addition_record.rs +++ b/src/util_types/mutator_set/addition_record.rs @@ -33,12 +33,12 @@ pub fn pseudorandom_addition_record(seed: [u8; 32]) -> AdditionRecord { #[cfg(test)] mod addition_record_tests { - use crate::models::blockchain::shared::Hash; - use crate::util_types::mutator_set::mutator_set_trait::commit; use rand::random; use twenty_first::util_types::algebraic_hasher::AlgebraicHasher; + use crate::{models::blockchain::shared::Hash, util_types::mutator_set::commit}; + use super::*; #[test] diff --git a/src/util_types/mutator_set/archival_mmr.rs b/src/util_types/mutator_set/archival_mmr.rs new file mode 100644 index 00000000..f72299ce --- /dev/null +++ b/src/util_types/mutator_set/archival_mmr.rs @@ -0,0 +1,1168 @@ +use crate::database::storage::storage_vec::traits::*; +use crate::prelude::twenty_first; + +use tasm_lib::twenty_first::util_types::mmr::shared_advanced::get_authentication_path_node_indices; +use tasm_lib::twenty_first::util_types::mmr::shared_advanced::get_peak_heights_and_peak_node_indices; +use tasm_lib::twenty_first::util_types::mmr::shared_advanced::node_index_to_leaf_index; +use tasm_lib::twenty_first::util_types::mmr::shared_basic::leaf_index_to_mt_index_and_peak_index; +use tasm_lib::twenty_first::util_types::mmr::shared_basic::right_lineage_length_from_leaf_index; +use twenty_first::shared_math::digest::Digest; +use twenty_first::util_types::algebraic_hasher::AlgebraicHasher; +use twenty_first::util_types::shared::bag_peaks; + +use std::marker::PhantomData; + +use itertools::Itertools; + +use twenty_first::util_types::mmr::{ + mmr_accumulator::MmrAccumulator, mmr_membership_proof::MmrMembershipProof, mmr_trait::Mmr, + shared_advanced, shared_basic, +}; + +/// A Merkle Mountain Range is a datastructure for storing a list of hashes. +/// +/// Merkle Mountain Ranges only know about hashes. When values are to be associated with +/// MMRs, these values must be stored by the caller, or in a wrapper to this data structure. +pub struct ArchivalMmr> { + digests: Storage, + _hasher: PhantomData, +} + +impl ArchivalMmr +where + H: AlgebraicHasher + Send + Sync, + Storage: StorageVec, +{ + /// Calculate the root for the entire MMR + pub async fn bag_peaks(&self) -> Digest { + let peaks: Vec = self.get_peaks().await; + bag_peaks::(&peaks) + } + + /// Return the digests of the peaks of the MMR + pub async fn get_peaks(&self) -> Vec { + let peaks_and_heights = self.get_peaks_with_heights_async().await; + peaks_and_heights.into_iter().map(|x| x.0).collect() + } + + /// Whether the MMR is empty. Note that since indexing starts at + /// 1, the `digests` contain must always contain at least one + /// element: a dummy digest. + pub async fn is_empty(&self) -> bool { + self.digests.len().await == 1 + } + + /// Return the number of leaves in the tree + pub async fn count_leaves(&self) -> u64 { + node_index_to_leaf_index(self.digests.len().await).unwrap() + } + + /// Append an element to the archival MMR, return the membership proof of the newly added leaf. + pub async fn append(&mut self, new_leaf: Digest) -> MmrMembershipProof { + let mut node_index = self.digests.len().await; + let leaf_index = node_index_to_leaf_index(node_index).unwrap(); + let right_lineage_length = right_lineage_length_from_leaf_index(leaf_index); + self.digests.push(new_leaf).await; + + let mut returned_auth_path = vec![]; + let mut acc_hash = new_leaf; + for height in 0..right_lineage_length { + let left_sibling_hash = self + .digests + .get(shared_advanced::left_sibling(node_index, height)) + .await; + returned_auth_path.push(left_sibling_hash); + acc_hash = H::hash_pair(left_sibling_hash, acc_hash); + self.digests.push(acc_hash).await; + node_index += 1; + } + + MmrMembershipProof { + leaf_index, + authentication_path: returned_auth_path, + _hasher: PhantomData, + } + } + + /// Mutate an existing leaf. + pub async fn mutate_leaf(&mut self, leaf_index: u64, new_leaf: Digest) { + // 1. change the leaf value + let mut node_index = shared_advanced::leaf_index_to_node_index(leaf_index); + self.digests.set(node_index, new_leaf).await; + // leaf_index_to_mt_index_and_peak_index + + // While parent exists in MMR, update parent + let mut parent_index = shared_advanced::parent(node_index); + let mut acc_hash = new_leaf; + while parent_index < self.digests.len().await { + let (right_lineage_count, height) = + shared_advanced::right_lineage_length_and_own_height(node_index); + acc_hash = if right_lineage_count != 0 { + // node is right child + H::hash_pair( + self.digests + .get(shared_advanced::left_sibling(node_index, height)) + .await, + acc_hash, + ) + } else { + // node is left child + H::hash_pair( + acc_hash, + self.digests + .get(shared_advanced::right_sibling(node_index, height)) + .await, + ) + }; + self.digests.set(parent_index, acc_hash).await; + node_index = parent_index; + parent_index = shared_advanced::parent(parent_index); + } + } + + /// Modify a bunch of leafs and keep a set of membership proofs in sync. Notice that this + /// function is not just the application of `mutate_leaf` multiple times, as it also preserves + /// a list of membership proofs. + pub async fn batch_mutate_leaf_and_update_mps( + &mut self, + membership_proofs: &mut [&mut MmrMembershipProof], + mutation_data: Vec<(u64, Digest)>, + ) -> Vec { + assert!( + mutation_data.iter().map(|md| md.0).all_unique(), + "Duplicated leaves are not allowed in membership proof updater" + ); + + for (leaf_index, digest) in mutation_data.iter() { + self.mutate_leaf(*leaf_index, *digest).await; + } + + let mut modified_mps: Vec = vec![]; + for (i, mp) in membership_proofs.iter_mut().enumerate() { + let new_mp = self.prove_membership_async(mp.leaf_index).await; + if new_mp != **mp { + modified_mps.push(i); + } + + **mp = new_mp + } + + modified_mps + } + + pub async fn verify_batch_update( + &self, + new_peaks: &[Digest], + appended_leafs: &[Digest], + leaf_mutations: &[(Digest, MmrMembershipProof)], + ) -> bool { + let accumulator: MmrAccumulator = self.to_accumulator_async().await; + accumulator.verify_batch_update(new_peaks, appended_leafs, leaf_mutations) + } + + pub async fn to_accumulator_async(&self) -> MmrAccumulator { + MmrAccumulator::init(self.get_peaks().await, self.count_leaves().await) + } +} + +impl> ArchivalMmr { + /// Create a new archival MMR, or restore one from a database. + pub async fn new(pv: Storage) -> Self { + let mut ret = Self { + digests: pv, + _hasher: PhantomData, + }; + ret.fix_dummy_async().await; + ret + } + + /// Inserts a dummy digest into the `digests` container. Due to + /// 1-indexation, this structure must always contain one element + /// (even if it is never used). Due to the persistence layer, + /// this data structure can be set to the default vector, which + /// is the empty vector. This method fixes that. + pub async fn fix_dummy_async(&mut self) { + if self.digests.len().await == 0 { + self.digests.push(Digest::default()).await; + } + } + + /// Get a leaf from the MMR, will panic if index is out of range + pub async fn get_leaf_async(&self, leaf_index: u64) -> Digest { + let node_index = shared_advanced::leaf_index_to_node_index(leaf_index); + self.digests.get(node_index).await + } + + /// Return membership proof + pub async fn prove_membership_async(&self, leaf_index: u64) -> MmrMembershipProof { + // A proof consists of an authentication path + // and a list of peaks + let num_leafs = self.count_leaves().await; + assert!( + leaf_index < num_leafs, + "Cannot prove membership of leaf outside of range. Got leaf_index {leaf_index}. Leaf count is {}", self.count_leaves().await + ); + + let node_index = shared_advanced::leaf_index_to_node_index(leaf_index); + let (_, own_index_into_peaks_list) = + leaf_index_to_mt_index_and_peak_index(leaf_index, num_leafs); + let (_, peak_indices) = get_peak_heights_and_peak_node_indices(num_leafs); + let num_nodes = self.digests.len().await; + let sibling_indices = get_authentication_path_node_indices( + node_index, + peak_indices[own_index_into_peaks_list as usize], + num_nodes, + ) + .unwrap(); + + let authentication_path = self.digests.get_many(&sibling_indices).await; + + MmrMembershipProof::new(leaf_index, authentication_path) + } + + /// Return a list of tuples (peaks, height) + pub async fn get_peaks_with_heights_async(&self) -> Vec<(Digest, u32)> { + if self.is_empty().await { + return vec![]; + } + + // 1. Find top peak + // 2. Jump to right sibling (will not be included) + // 3. Take left child of sibling, continue until a node in tree is found + // 4. Once new node is found, jump to right sibling (will not be included) + // 5. Take left child of sibling, continue until a node in tree is found + let mut peaks_and_heights: Vec<(Digest, u32)> = vec![]; + let (mut top_peak, mut top_height) = + shared_advanced::leftmost_ancestor(self.digests.len().await - 1); + if top_peak > self.digests.len().await - 1 { + top_peak = shared_basic::left_child(top_peak, top_height); + top_height -= 1; + } + + peaks_and_heights.push((self.digests.get(top_peak).await, top_height)); + let mut height = top_height; + let mut candidate = shared_advanced::right_sibling(top_peak, height); + 'outer: while height > 0 { + '_inner: while candidate > self.digests.len().await && height > 0 { + candidate = shared_basic::left_child(candidate, height); + height -= 1; + if candidate < self.digests.len().await { + peaks_and_heights.push((self.digests.get(candidate).await, height)); + candidate = shared_advanced::right_sibling(candidate, height); + continue 'outer; + } + } + } + + peaks_and_heights + } + + /// Remove the last leaf from the archival MMR + pub async fn remove_last_leaf_async(&mut self) -> Option { + if self.is_empty().await { + return None; + } + + let node_index = self.digests.len().await - 1; + let mut ret = self.digests.pop().await.unwrap(); + let (_, mut height) = shared_advanced::right_lineage_length_and_own_height(node_index); + while height > 0 { + ret = self.digests.pop().await.unwrap(); + height -= 1; + } + + Some(ret) + } +} + +#[cfg(test)] +pub(crate) mod mmr_test { + + use super::*; + + use itertools::*; + use rand::random; + use rand::thread_rng; + use test_strategy::proptest; + + use crate::database::storage::storage_schema::traits::*; + use crate::database::storage::storage_schema::SimpleRustyStorage; + use crate::database::storage::storage_vec::OrdinaryVec; + use crate::database::NeptuneLevelDb; + use twenty_first::shared_math::b_field_element::BFieldElement; + use twenty_first::shared_math::other::*; + use twenty_first::shared_math::tip5::Tip5; + use twenty_first::util_types::merkle_tree::*; + use twenty_first::util_types::merkle_tree_maker::MerkleTreeMaker; + use twenty_first::util_types::mmr::mmr_accumulator::MmrAccumulator; + use twenty_first::util_types::mmr::shared_advanced::get_peak_heights_and_peak_node_indices; + + type Storage = OrdinaryVec; + + pub(crate) mod mock { + use super::*; + + /// Return an empty ArchivalMmR for testing purposes. + /// Does *not* have a unique ID, so you can't expect multiple of these + /// instances to behave independently unless you understand the + /// underlying data structure. + pub async fn get_empty_ammr() -> ArchivalMmr { + let pv: Storage = Default::default(); + ArchivalMmr::new(pv).await + } + + pub async fn get_ammr_from_digests(digests: Vec) -> ArchivalMmr + where + H: AlgebraicHasher, + { + let mut ammr = get_empty_ammr().await; + for digest in digests { + ammr.append(digest).await; + } + ammr + } + } + + mod test_tree { + use super::*; + use proptest_arbitrary_interop::arb; + + /// Test helper to deduplicate generation of Merkle trees. + #[derive(Debug, Clone, test_strategy::Arbitrary)] + pub(crate) struct MerkleTreeToTest { + #[strategy(arb())] + pub tree: MerkleTree, + } + } + + impl> ArchivalMmr { + /// Return the number of nodes in all the trees in the MMR + async fn count_nodes(&mut self) -> u64 { + self.digests.len().await - 1 + } + } + + /// Calculate a Merkle root from a list of digests of arbitrary length. + pub fn root_from_arbitrary_number_of_digests(digests: &[Digest]) -> Digest { + let mut trees = vec![]; + let mut num_processed_digests = 0; + for tree_height in indices_of_set_bits(digests.len() as u64) { + let num_leaves_in_tree = 1 << tree_height; + let leaf_digests = + &digests[num_processed_digests..num_processed_digests + num_leaves_in_tree]; + let tree: MerkleTree = CpuParallel::from_digests(leaf_digests).unwrap(); + num_processed_digests += num_leaves_in_tree; + trees.push(tree); + } + let roots = trees.iter().map(|t| t.root()).collect_vec(); + bag_peaks::(&roots) + } + + /// A block can contain an empty list of addition or removal records. + #[test] + fn computing_mmr_root_for_no_leaves_produces_some_digest() { + root_from_arbitrary_number_of_digests::(&[]); + } + + #[proptest(cases = 30)] + fn mmr_root_of_arbitrary_number_of_leaves_is_merkle_root_when_number_of_leaves_is_a_power_of_two( + test_tree: test_tree::MerkleTreeToTest, + ) { + let root = root_from_arbitrary_number_of_digests::(test_tree.tree.leaves()); + assert_eq!(test_tree.tree.root(), root); + } + + #[tokio::test] + async fn empty_mmr_behavior_test() { + type H = blake3::Hasher; + + let mut archival_mmr: ArchivalMmr = mock::get_empty_ammr().await; + let mut accumulator_mmr: MmrAccumulator = MmrAccumulator::::new(vec![]); + + assert_eq!(0, archival_mmr.count_leaves().await); + assert_eq!(0, accumulator_mmr.count_leaves()); + assert_eq!(archival_mmr.get_peaks().await, accumulator_mmr.get_peaks()); + assert_eq!(Vec::::new(), accumulator_mmr.get_peaks()); + assert_eq!(archival_mmr.bag_peaks().await, accumulator_mmr.bag_peaks()); + assert_eq!( + archival_mmr.bag_peaks().await, + root_from_arbitrary_number_of_digests::(&[]), + "Bagged peaks for empty MMR must agree with MT root finder" + ); + assert_eq!(0, archival_mmr.count_nodes().await); + assert!(accumulator_mmr.is_empty()); + assert!(archival_mmr.is_empty().await); + + // Test behavior of appending to an empty MMR + let new_leaf = random(); + + let mut archival_mmr_appended = mock::get_empty_ammr().await; + { + let archival_membership_proof = archival_mmr_appended.append(new_leaf).await; + + // Verify that the MMR update can be validated + assert!( + archival_mmr + .verify_batch_update(&archival_mmr_appended.get_peaks().await, &[new_leaf], &[]) + .await + ); + + // Verify that failing MMR update for empty MMR fails gracefully + assert!( + !archival_mmr + .verify_batch_update( + &archival_mmr_appended.get_peaks().await, + &[], + &[(new_leaf, archival_membership_proof)] + ) + .await + ); + } + + // Make the append and verify that the new peaks match the one from the proofs + let archival_membership_proof = archival_mmr.append(new_leaf).await; + let accumulator_membership_proof = accumulator_mmr.append(new_leaf); + assert_eq!( + archival_mmr.get_peaks().await, + archival_mmr_appended.get_peaks().await + ); + assert_eq!( + accumulator_mmr.get_peaks(), + archival_mmr_appended.get_peaks().await + ); + + // Verify that the appended value matches the one stored in the archival MMR + assert_eq!(new_leaf, archival_mmr.get_leaf_async(0).await); + + // Verify that the membership proofs for the inserted leafs are valid and that they agree + assert_eq!( + archival_membership_proof, accumulator_membership_proof, + "accumulator and archival membership proofs must agree" + ); + assert!( + archival_membership_proof + .verify( + &archival_mmr.get_peaks().await, + new_leaf, + archival_mmr.count_leaves().await + ) + .0, + "membership proof from arhival MMR must validate" + ); + } + + #[tokio::test] + async fn verify_against_correct_peak_test() { + type H = blake3::Hasher; + + // This test addresses a bug that was discovered late in the development process + // where it was possible to fake a verification proof by providing a valid leaf + // and authentication path but lying about the data index. This error occurred + // because the derived hash was compared against all of the peaks to find a match + // and it wasn't verified that the accumulated hash matched the *correct* peak. + // This error was fixed and this test fails without that fix. + let leaf_hashes: Vec = random_elements(3); + + // let archival_mmr = ArchivalMmr::::new(leaf_hashes.clone()); + let archival_mmr = mock::get_ammr_from_digests::(leaf_hashes.clone()).await; + let peaks = archival_mmr.get_peaks().await; + let mut membership_proof = archival_mmr.prove_membership_async(0).await; + + // Verify that the accumulated hash in the verifier is compared against the **correct** hash, + // not just **any** hash in the peaks list. + assert!(membership_proof.verify(&peaks, leaf_hashes[0], 3,).0); + membership_proof.leaf_index = 2; + assert!(!membership_proof.verify(&peaks, leaf_hashes[0], 3,).0); + membership_proof.leaf_index = 0; + + // verify the same behavior in the accumulator MMR + let accumulator_mmr = MmrAccumulator::::new(leaf_hashes.clone()); + assert!( + membership_proof + .verify( + &accumulator_mmr.get_peaks(), + leaf_hashes[0], + accumulator_mmr.count_leaves() + ) + .0 + ); + membership_proof.leaf_index = 2; + assert!( + !membership_proof + .verify( + &accumulator_mmr.get_peaks(), + leaf_hashes[0], + accumulator_mmr.count_leaves() + ) + .0 + ); + } + + #[tokio::test] + async fn mutate_leaf_archival_test() { + type H = Tip5; + + // Create ArchivalMmr + + let leaf_count = 3; + let leaf_hashes: Vec = random_elements(leaf_count); + let mut archival_mmr = mock::get_ammr_from_digests::(leaf_hashes.clone()).await; + + let leaf_index: u64 = 2; + let old_peaks = archival_mmr.get_peaks().await; + let mp1 = archival_mmr.prove_membership_async(leaf_index).await; + + // Verify single leaf + + let (mp1_verifies, _acc_hash_1) = mp1.verify( + &old_peaks, + leaf_hashes[leaf_index as usize], + leaf_count as u64, + ); + assert!(mp1_verifies); + + // Create copy of ArchivalMmr, recreate membership proof + + let mut other_archival_mmr: ArchivalMmr = + mock::get_ammr_from_digests::(leaf_hashes.clone()).await; + + let mp2 = other_archival_mmr.prove_membership_async(leaf_index).await; + + // Mutate leaf + mutate leaf raw, assert that they're equivalent + + let mutated_leaf = H::hash(&BFieldElement::new(10000)); + other_archival_mmr + .mutate_leaf(leaf_index, mutated_leaf) + .await; + + let new_peaks_one = other_archival_mmr.get_peaks().await; + archival_mmr.mutate_leaf(leaf_index, mutated_leaf).await; + + let new_peaks_two = archival_mmr.get_peaks().await; + assert_eq!( + new_peaks_two, new_peaks_one, + "peaks for two update leaf method calls must agree" + ); + + // Verify that peaks have changed as expected + + let expected_num_peaks = 2; + assert_ne!(old_peaks[1], new_peaks_two[1]); + assert_eq!(old_peaks[0], new_peaks_two[0]); + assert_eq!(expected_num_peaks, new_peaks_two.len()); + assert_eq!(expected_num_peaks, old_peaks.len()); + + let (mp2_verifies_non_mutated_leaf, _acc_hash_3) = mp2.verify( + &new_peaks_two, + leaf_hashes[leaf_index as usize], + leaf_count as u64, + ); + assert!(!mp2_verifies_non_mutated_leaf); + + let (mp2_verifies_mutated_leaf, _acc_hash_4) = + mp2.verify(&new_peaks_two, mutated_leaf, leaf_count as u64); + assert!(mp2_verifies_mutated_leaf); + + // Create a new archival MMR with the same leaf hashes as in the + // modified MMR, and verify that the two MMRs are equivalent + + let archival_mmr_new: ArchivalMmr = + mock::get_ammr_from_digests::(leaf_hashes).await; + assert_eq!( + archival_mmr.digests.len().await, + archival_mmr_new.digests.len().await + ); + + for i in 0..leaf_count { + assert_eq!( + archival_mmr.digests.get(i as u64).await, + archival_mmr_new.digests.get(i as u64).await + ); + } + } + + async fn bag_peaks_gen() { + // Verify that archival and accumulator MMR produce the same root + let leaf_hashes_blake3: Vec = random_elements(3); + let archival_mmr_small: ArchivalMmr = + mock::get_ammr_from_digests::(leaf_hashes_blake3.clone()).await; + let accumulator_mmr_small = MmrAccumulator::::new(leaf_hashes_blake3); + assert_eq!( + archival_mmr_small.bag_peaks().await, + accumulator_mmr_small.bag_peaks() + ); + assert_eq!( + archival_mmr_small.bag_peaks().await, + bag_peaks::(&accumulator_mmr_small.get_peaks()) + ); + assert!(!accumulator_mmr_small + .get_peaks() + .iter() + .any(|peak| *peak == accumulator_mmr_small.bag_peaks())); + } + + #[tokio::test] + async fn bag_peaks_blake3_test() { + bag_peaks_gen::().await; + bag_peaks_gen::().await; + } + + #[tokio::test] + async fn compare_batch_and_individual_leaf_mutation() { + type H = Tip5; + use rand::seq::SliceRandom; + + let mut rng = thread_rng(); + for size in 0..25 { + let init_digests = random_elements(size); + let mut archival_batch_mut: ArchivalMmr = + mock::get_ammr_from_digests::(init_digests.clone()).await; + let mut archival_individual_mut = mock::get_ammr_from_digests::(init_digests).await; + + for max_mutation_count in 0..size { + let all_indices = (0..size as u64).collect_vec(); + let mutated_indices = (0..max_mutation_count) + .map(|_| *all_indices.choose(&mut rng).unwrap()) + .collect_vec(); + let mutated_indices = mutated_indices.into_iter().unique().collect_vec(); + let new_leafs = random_elements(max_mutation_count); + let mutation_data = mutated_indices + .clone() + .into_iter() + .zip(new_leafs.into_iter()) + .collect_vec(); + + archival_batch_mut + .batch_mutate_leaf_and_update_mps(&mut [], mutation_data.clone()) + .await; + + for (index, new_leaf) in mutation_data { + archival_individual_mut.mutate_leaf(index, new_leaf).await; + } + + assert_eq!( + archival_batch_mut.get_peaks().await, + archival_individual_mut.get_peaks().await + ); + } + } + } + + #[tokio::test] + async fn accumulator_mmr_mutate_leaf_test() { + type H = blake3::Hasher; + + // Verify that upating leafs in archival and in accumulator MMR results in the same peaks + // and verify that updating all leafs in an MMR results in the expected MMR + for size in 1..150 { + let new_leaf: Digest = random(); + let leaf_hashes_blake3: Vec = random_elements(size); + + let mut acc = MmrAccumulator::::new(leaf_hashes_blake3.clone()); + let mut archival: ArchivalMmr = + mock::get_ammr_from_digests::(leaf_hashes_blake3.clone()).await; + let archival_end_state: ArchivalMmr = + mock::get_ammr_from_digests::(vec![new_leaf; size]).await; + for i in 0..size { + let i = i as u64; + let mp = archival.prove_membership_async(i).await; + assert_eq!(i, mp.leaf_index); + acc.mutate_leaf(&mp, new_leaf); + archival.mutate_leaf(i, new_leaf).await; + let new_archival_peaks = archival.get_peaks().await; + assert_eq!(new_archival_peaks, acc.get_peaks()); + } + + assert_eq!(archival_end_state.get_peaks().await, acc.get_peaks()); + } + } + + #[tokio::test] + async fn mmr_prove_verify_leaf_mutation_test() { + type H = blake3::Hasher; + + for size in 1..150 { + let new_leaf: Digest = random(); + let bad_leaf: Digest = random(); + let leaf_hashes_blake3: Vec = random_elements(size); + let mut acc = MmrAccumulator::::new(leaf_hashes_blake3.clone()); + let mut archival: ArchivalMmr = + mock::get_ammr_from_digests::(leaf_hashes_blake3.clone()).await; + let archival_end_state: ArchivalMmr = + mock::get_ammr_from_digests::(vec![new_leaf; size]).await; + for i in 0..size { + let i = i as u64; + let peaks_before_update = archival.get_peaks().await; + let mp = archival.prove_membership_async(i).await; + assert_eq!(archival.get_peaks().await, peaks_before_update); + + // Verify the update operation using the batch verifier + archival.mutate_leaf(i, new_leaf).await; + assert!( + acc.verify_batch_update( + &archival.get_peaks().await, + &[], + &[(new_leaf, mp.clone())] + ), + "Valid batch update parameters must succeed" + ); + assert!( + !acc.verify_batch_update( + &archival.get_peaks().await, + &[], + &[(bad_leaf, mp.clone())] + ), + "Inalid batch update parameters must fail" + ); + + acc.mutate_leaf(&mp, new_leaf); + let new_archival_peaks = archival.get_peaks().await; + assert_eq!(new_archival_peaks, acc.get_peaks()); + assert_eq!(size as u64, archival.count_leaves().await); + assert_eq!(size as u64, acc.count_leaves()); + } + assert_eq!(archival_end_state.get_peaks().await, acc.get_peaks()); + } + } + + #[tokio::test] + async fn mmr_append_test() { + type H = blake3::Hasher; + + // Verify that building an MMR iteratively or in *one* function call results in the same MMR + for size in 1..260 { + let leaf_hashes_blake3: Vec = random_elements(size); + let mut archival_iterative: ArchivalMmr = + mock::get_ammr_from_digests::(vec![]).await; + let archival_batch: ArchivalMmr = + mock::get_ammr_from_digests::(leaf_hashes_blake3.clone()).await; + let mut accumulator_iterative = MmrAccumulator::::new(vec![]); + let accumulator_batch = MmrAccumulator::::new(leaf_hashes_blake3.clone()); + for (leaf_index, leaf_hash) in leaf_hashes_blake3.clone().into_iter().enumerate() { + let archival_membership_proof: MmrMembershipProof = + archival_iterative.append(leaf_hash).await; + let accumulator_membership_proof = accumulator_iterative.append(leaf_hash); + + // Verify membership proofs returned from the append operation + assert_eq!( + accumulator_membership_proof, archival_membership_proof, + "membership proofs from append operation must agree" + ); + assert!( + archival_membership_proof + .verify( + &archival_iterative.get_peaks().await, + leaf_hash, + archival_iterative.count_leaves().await + ) + .0 + ); + + // Verify that membership proofs are the same as generating them from an archival MMR + let archival_membership_proof_direct = archival_iterative + .prove_membership_async(leaf_index as u64) + .await; + assert_eq!(archival_membership_proof_direct, archival_membership_proof); + } + + // Verify that the MMRs built iteratively from `append` and in *one* batch are the same + assert_eq!( + accumulator_batch.get_peaks(), + accumulator_iterative.get_peaks() + ); + assert_eq!( + accumulator_batch.count_leaves(), + accumulator_iterative.count_leaves() + ); + assert_eq!(size as u64, accumulator_iterative.count_leaves()); + assert_eq!( + archival_iterative.get_peaks().await, + accumulator_iterative.get_peaks() + ); + + // Run a batch-append verification on the entire mutation of the MMR and verify that it succeeds + let empty_accumulator = MmrAccumulator::::new(vec![]); + assert!(empty_accumulator.verify_batch_update( + &archival_batch.get_peaks().await, + &leaf_hashes_blake3, + &[], + )); + } + } + + #[tokio::test] + async fn one_input_mmr_test() { + type H = Tip5; + + let input_hash = H::hash(&BFieldElement::new(14)); + let new_input_hash = H::hash(&BFieldElement::new(201)); + let mut mmr: ArchivalMmr = + mock::get_ammr_from_digests::(vec![input_hash]).await; + let original_mmr: ArchivalMmr = + mock::get_ammr_from_digests::(vec![input_hash]).await; + let mmr_after_append: ArchivalMmr = + mock::get_ammr_from_digests::(vec![input_hash, new_input_hash]).await; + assert_eq!(1, mmr.count_leaves().await); + assert_eq!(1, mmr.count_nodes().await); + + let original_peaks_and_heights: Vec<(Digest, u32)> = + mmr.get_peaks_with_heights_async().await; + assert_eq!(1, original_peaks_and_heights.len()); + assert_eq!(0, original_peaks_and_heights[0].1); + + { + let leaf_index = 0; + let peaks = mmr.get_peaks().await; + let membership_proof = mmr.prove_membership_async(leaf_index).await; + let valid_res = membership_proof.verify(&peaks, input_hash, 1); + assert!(valid_res.0); + assert!(valid_res.1.is_some()); + } + + mmr.append(new_input_hash).await; + assert_eq!(2, mmr.count_leaves().await); + assert_eq!(3, mmr.count_nodes().await); + + let new_peaks_and_heights = mmr.get_peaks_with_heights_async().await; + assert_eq!(1, new_peaks_and_heights.len()); + assert_eq!(1, new_peaks_and_heights[0].1); + + let new_peaks: Vec = new_peaks_and_heights.iter().map(|x| x.0).collect(); + assert!( + original_mmr + .verify_batch_update(&new_peaks, &[new_input_hash], &[]) + .await, + "verify batch update must succeed for a single append" + ); + + // let mmr_after_append = mmr.clone(); + let new_leaf: Digest = H::hash(&BFieldElement::new(987223)); + + // When verifying the batch update with two consequtive leaf mutations, we must get the + // membership proofs prior to all mutations. This is because the `verify_batch_update` method + // updates the membership proofs internally to account for the mutations. + let leaf_mutations: Vec<(Digest, MmrMembershipProof)> = + futures::future::join_all((0..2).map(|i| mmr_after_append.prove_membership_async(i))) + .await + .into_iter() + .map(|p| (new_leaf, p)) + .collect(); + + for leaf_index in [0u64, 1] { + mmr.mutate_leaf(leaf_index, new_leaf).await; + assert_eq!( + new_leaf, + mmr.get_leaf_async(leaf_index).await, + "fetched leaf must match what we put in" + ); + } + + assert!( + mmr_after_append + .verify_batch_update(&mmr.get_peaks().await, &[], &leaf_mutations) + .await, + "The batch update of two leaf mutations must verify" + ); + } + + #[tokio::test] + async fn two_input_mmr_test() { + type H = Tip5; + + let num_leaves: u64 = 3; + let input_digests: Vec = random_elements(num_leaves as usize); + + let mut mmr: ArchivalMmr = + mock::get_ammr_from_digests::(input_digests.clone()).await; + assert_eq!(num_leaves, mmr.count_leaves().await); + assert_eq!(1 + num_leaves, mmr.count_nodes().await); + + let original_peaks_and_heights: Vec<(Digest, u32)> = + mmr.get_peaks_with_heights_async().await; + let expected_peaks = 2; + assert_eq!(expected_peaks, original_peaks_and_heights.len()); + + { + let leaf_index = 0; + let input_digest = input_digests[leaf_index]; + let peaks = mmr.get_peaks().await; + let mut membership_proof = mmr.prove_membership_async(leaf_index as u64).await; + + let (mp_verifies_1, acc_hash_1) = + membership_proof.verify(&peaks, input_digest, num_leaves); + assert!(mp_verifies_1); + assert!(acc_hash_1.is_some()); + + // Negative test for verify membership + membership_proof.leaf_index += 1; + + let (mp_verifies_2, acc_hash_2) = + membership_proof.verify(&peaks, input_digest, num_leaves); + assert!(!mp_verifies_2); + assert!(acc_hash_2.is_none()); + } + + let new_leaf_hash: Digest = H::hash(&BFieldElement::new(201)); + mmr.append(new_leaf_hash).await; + + let expected_num_leaves = 1 + num_leaves; + assert_eq!(expected_num_leaves, mmr.count_leaves().await); + + let expected_node_count = 3 + expected_num_leaves; + assert_eq!(expected_node_count, mmr.count_nodes().await); + + for leaf_index in 0..num_leaves { + let new_leaf: Digest = H::hash(&BFieldElement::new(987223)); + mmr.mutate_leaf(leaf_index, new_leaf).await; + assert_eq!(new_leaf, mmr.get_leaf_async(leaf_index).await); + } + } + + #[tokio::test] + async fn variable_size_tip5_mmr_test() { + type H = Tip5; + + let leaf_counts: Vec = (1..34).collect(); + let node_counts: Vec = vec![ + 1, 3, 4, 7, 8, 10, 11, 15, 16, 18, 19, 22, 23, 25, 26, 31, 32, 34, 35, 38, 39, 41, 42, + 46, 47, 49, 50, 53, 54, 56, 57, 63, 64, + ]; + let peak_counts: Vec = vec![ + 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, + 4, 5, 1, 2, + ]; + + for (leaf_count, node_count, peak_count) in izip!(leaf_counts, node_counts, peak_counts) { + let input_hashes: Vec = random_elements(leaf_count as usize); + let mut mmr: ArchivalMmr = + mock::get_ammr_from_digests::(input_hashes.clone()).await; + + assert_eq!(leaf_count, mmr.count_leaves().await); + assert_eq!(node_count, mmr.count_nodes().await); + + let original_peaks_and_heights = mmr.get_peaks_with_heights_async().await; + let peak_heights_1: Vec = original_peaks_and_heights.iter().map(|x| x.1).collect(); + + let (peak_heights_2, _) = get_peak_heights_and_peak_node_indices(leaf_count); + assert_eq!(peak_heights_1, peak_heights_2); + + let actual_peak_count = original_peaks_and_heights.len() as u64; + assert_eq!(peak_count, actual_peak_count); + + // Verify that MMR root from odd number of digests and MMR bagged peaks agree + let mmra_root = mmr.bag_peaks().await; + let mt_root = root_from_arbitrary_number_of_digests::(&input_hashes); + + assert_eq!( + mmra_root, mt_root, + "MMRA bagged peaks and MT root must agree" + ); + + // Get an authentication path for **all** values in MMR, + // verify that it is valid + for index in 0..leaf_count { + let peaks = mmr.get_peaks().await; + let membership_proof = mmr.prove_membership_async(index).await; + let valid_res = + membership_proof.verify(&peaks, input_hashes[index as usize], leaf_count); + + assert!(valid_res.0); + assert!(valid_res.1.is_some()); + } + + // // Make a new MMR where we append with a value and run the verify_append + let new_leaf_hash = H::hash(&BFieldElement::new(201)); + let orignal_peaks = mmr.get_peaks().await; + let mp = mmr.append(new_leaf_hash).await; + assert!( + mp.verify(&mmr.get_peaks().await, new_leaf_hash, leaf_count + 1) + .0, + "Returned membership proof from append must verify" + ); + assert_ne!( + orignal_peaks, + mmr.get_peaks().await, + "peaks must change when appending" + ); + } + } + + #[tokio::test] + async fn remove_last_leaf_test() { + type H = blake3::Hasher; + + let input_digests: Vec = random_elements(12); + let mut mmr: ArchivalMmr = + mock::get_ammr_from_digests::(input_digests.clone()).await; + assert_eq!(22, mmr.count_nodes().await); + assert_eq!(Some(input_digests[11]), mmr.remove_last_leaf_async().await); + assert_eq!(19, mmr.count_nodes().await); + assert_eq!(Some(input_digests[10]), mmr.remove_last_leaf_async().await); + assert_eq!(18, mmr.count_nodes().await); + assert_eq!(Some(input_digests[9]), mmr.remove_last_leaf_async().await); + assert_eq!(16, mmr.count_nodes().await); + assert_eq!(Some(input_digests[8]), mmr.remove_last_leaf_async().await); + assert_eq!(15, mmr.count_nodes().await); + assert_eq!(Some(input_digests[7]), mmr.remove_last_leaf_async().await); + assert_eq!(11, mmr.count_nodes().await); + assert_eq!(Some(input_digests[6]), mmr.remove_last_leaf_async().await); + assert_eq!(10, mmr.count_nodes().await); + assert_eq!(Some(input_digests[5]), mmr.remove_last_leaf_async().await); + assert_eq!(8, mmr.count_nodes().await); + assert_eq!(Some(input_digests[4]), mmr.remove_last_leaf_async().await); + assert_eq!(7, mmr.count_nodes().await); + assert_eq!(Some(input_digests[3]), mmr.remove_last_leaf_async().await); + assert_eq!(4, mmr.count_nodes().await); + assert_eq!(Some(input_digests[2]), mmr.remove_last_leaf_async().await); + assert_eq!(3, mmr.count_nodes().await); + assert_eq!(Some(input_digests[1]), mmr.remove_last_leaf_async().await); + assert_eq!(1, mmr.count_nodes().await); + assert_eq!(Some(input_digests[0]), mmr.remove_last_leaf_async().await); + assert_eq!(0, mmr.count_nodes().await); + assert!(mmr.is_empty().await); + assert!(mmr.remove_last_leaf_async().await.is_none()); + } + + #[tokio::test] + async fn remove_last_leaf_pbt() { + type H = blake3::Hasher; + + let small_size: usize = 100; + let big_size: usize = 350; + let input_digests_big: Vec = random_elements(big_size); + let input_digests_small: Vec = input_digests_big[0..small_size].to_vec(); + + let mut mmr_small: ArchivalMmr = + mock::get_ammr_from_digests::(input_digests_small).await; + let mut mmr_big: ArchivalMmr = + mock::get_ammr_from_digests::(input_digests_big).await; + + for _ in 0..(big_size - small_size) { + mmr_big.remove_last_leaf_async().await; + } + + assert_eq!(mmr_big.get_peaks().await, mmr_small.get_peaks().await); + assert_eq!(mmr_big.bag_peaks().await, mmr_small.bag_peaks().await); + assert_eq!(mmr_big.count_leaves().await, mmr_small.count_leaves().await); + assert_eq!(mmr_big.count_nodes().await, mmr_small.count_nodes().await); + } + + #[tokio::test] + async fn variable_size_blake3_mmr_test() { + type H = blake3::Hasher; + + let node_counts: Vec = vec![ + 1, 3, 4, 7, 8, 10, 11, 15, 16, 18, 19, 22, 23, 25, 26, 31, 32, 34, 35, 38, 39, 41, 42, + 46, 47, 49, 50, 53, 54, 56, 57, 63, 64, + ]; + let peak_counts: Vec = vec![ + 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, + 4, 5, 1, 2, + ]; + let leaf_counts: Vec = (1..34).collect(); + for (leaf_count, node_count, peak_count) in izip!(leaf_counts, node_counts, peak_counts) { + let size = leaf_count as u64; + let input_digests: Vec = random_elements(leaf_count); + let mut mmr: ArchivalMmr = + mock::get_ammr_from_digests::(input_digests.clone()).await; + let mmr_original: ArchivalMmr = + mock::get_ammr_from_digests::(input_digests.clone()).await; + assert_eq!(size, mmr.count_leaves().await); + assert_eq!(node_count, mmr.count_nodes().await); + let original_peaks_and_heights: Vec<(Digest, u32)> = + mmr.get_peaks_with_heights_async().await; + let peak_heights_1: Vec = original_peaks_and_heights.iter().map(|x| x.1).collect(); + let (peak_heights_2, _) = get_peak_heights_and_peak_node_indices(size); + assert_eq!(peak_heights_1, peak_heights_2); + assert_eq!(peak_count, original_peaks_and_heights.len() as u64); + + // Verify that MMR root from odd number of digests and MMR bagged peaks agree + let mmra_root = mmr.bag_peaks().await; + let mt_root = root_from_arbitrary_number_of_digests::(&input_digests); + assert_eq!( + mmra_root, mt_root, + "MMRA bagged peaks and MT root must agree" + ); + + // Get an authentication path for **all** values in MMR, + // verify that it is valid + for leaf_index in 0..size { + let peaks = mmr.get_peaks().await; + let mut membership_proof = mmr.prove_membership_async(leaf_index).await; + let valid_res = + membership_proof.verify(&peaks, input_digests[leaf_index as usize], size); + assert!(valid_res.0); + assert!(valid_res.1.is_some()); + + let new_leaf: Digest = random(); + + // The below verify_modify tests should only fail if `wrong_leaf_index` is + // different than `leaf_index`. + let wrong_leaf_index = (leaf_index + 1) % mmr.count_leaves().await; + membership_proof.leaf_index = wrong_leaf_index; + assert!( + wrong_leaf_index == leaf_index + || !membership_proof.verify(&peaks, new_leaf, size).0 + ); + membership_proof.leaf_index = leaf_index; + + // Modify an element in the MMR and run prove/verify for membership + let old_leaf = input_digests[leaf_index as usize]; + mmr.mutate_leaf(leaf_index, new_leaf).await; + + let new_peaks = mmr.get_peaks().await; + let new_mp = mmr.prove_membership_async(leaf_index).await; + assert!(new_mp.verify(&new_peaks, new_leaf, size).0); + assert!(!new_mp.verify(&new_peaks, old_leaf, size).0); + + // Return the element to its former value and run prove/verify for membership + mmr.mutate_leaf(leaf_index, old_leaf).await; + let old_peaks = mmr.get_peaks().await; + let old_mp = mmr.prove_membership_async(leaf_index).await; + assert!(!old_mp.verify(&old_peaks, new_leaf, size).0); + assert!(old_mp.verify(&old_peaks, old_leaf, size).0); + } + + // Make a new MMR where we append with a value and run the verify_append + let new_leaf_hash: Digest = random(); + mmr.append(new_leaf_hash).await; + assert!( + mmr_original + .verify_batch_update(&mmr.get_peaks().await, &[new_leaf_hash], &[]) + .await + ); + } + } + + #[tokio::test] + async fn leveldb_persist_storage_schema_test() { + type H = blake3::Hasher; + + let db = NeptuneLevelDb::open_new_test_database(true, None, None, None) + .await + .unwrap(); + let mut storage = SimpleRustyStorage::new(db); + let ammr0 = storage + .schema + .new_vec::("ammr-nodes-digests-0") + .await; + let mut ammr0: ArchivalMmr = ArchivalMmr::new(ammr0).await; + let ammr1 = storage + .schema + .new_vec::("ammr-nodes-digests-1") + .await; + let mut ammr1: ArchivalMmr = ArchivalMmr::new(ammr1).await; + + let digest0: Digest = random(); + ammr0.append(digest0).await; + + let digest1: Digest = random(); + ammr1.append(digest1).await; + assert_eq!(digest0, ammr0.get_leaf_async(0).await); + assert_eq!(digest1, ammr1.get_leaf_async(0).await); + storage.persist().await; + + assert_eq!(digest0, ammr0.get_leaf_async(0).await); + assert_eq!(digest1, ammr1.get_leaf_async(0).await); + } +} diff --git a/src/util_types/mutator_set/archival_mutator_set.rs b/src/util_types/mutator_set/archival_mutator_set.rs index 3029e875..dff76196 100644 --- a/src/util_types/mutator_set/archival_mutator_set.rs +++ b/src/util_types/mutator_set/archival_mutator_set.rs @@ -1,184 +1,200 @@ +use crate::database::storage::storage_vec::traits::*; use crate::models::blockchain::shared::Hash; use crate::prelude::twenty_first; +use crate::util_types::mutator_set::{get_swbf_indices, MutatorSetError}; use std::collections::{BTreeSet, HashMap}; use std::error::Error; + +use itertools::Itertools; use twenty_first::shared_math::tip5::Digest; use twenty_first::util_types::algebraic_hasher::AlgebraicHasher; use twenty_first::util_types::mmr; -use twenty_first::util_types::mmr::archival_mmr::ArchivalMmr; use twenty_first::util_types::mmr::mmr_accumulator::MmrAccumulator; -use twenty_first::util_types::mmr::mmr_trait::Mmr; -use twenty_first::util_types::storage_vec::traits::*; use super::active_window::ActiveWindow; use super::addition_record::AdditionRecord; +use super::archival_mmr::ArchivalMmr; use super::chunk::Chunk; use super::chunk_dictionary::ChunkDictionary; use super::ms_membership_proof::MsMembershipProof; use super::mutator_set_accumulator::MutatorSetAccumulator; -use super::mutator_set_kernel::{get_swbf_indices, MutatorSetKernel, MutatorSetKernelError}; -use super::mutator_set_trait::MutatorSet; use super::removal_record::RemovalRecord; -use super::shared::CHUNK_SIZE; +use super::shared::{BATCH_SIZE, CHUNK_SIZE}; pub struct ArchivalMutatorSet where - MmrStorage: StorageVec, - ChunkStorage: StorageVec, + MmrStorage: StorageVec + Send + Sync, + ChunkStorage: StorageVec + Send + Sync, { - pub kernel: MutatorSetKernel>, + pub aocl: ArchivalMmr, + pub swbf_inactive: ArchivalMmr, + pub swbf_active: ActiveWindow, pub chunks: ChunkStorage, } -impl MutatorSet for ArchivalMutatorSet +impl ArchivalMutatorSet where - MmrStorage: StorageVec, - ChunkStorage: StorageVec, + MmrStorage: StorageVec + Send + Sync, + ChunkStorage: StorageVec + StorageVecStream + Send + Sync, { - fn prove( - &mut self, + pub async fn prove( + &self, item: Digest, sender_randomness: Digest, receiver_preimage: Digest, ) -> MsMembershipProof { - self.kernel - .prove(item, sender_randomness, receiver_preimage) + MutatorSetAccumulator::new( + &self.aocl.get_peaks().await, + self.aocl.count_leaves().await, + &self.swbf_inactive.get_peaks().await, + &self.swbf_active.clone(), + ) + .prove(item, sender_randomness, receiver_preimage) } - fn verify(&self, item: Digest, membership_proof: &MsMembershipProof) -> bool { - self.kernel.verify(item, membership_proof) + pub async fn verify(&self, item: Digest, membership_proof: &MsMembershipProof) -> bool { + let accumulator = self.accumulator().await; + accumulator.verify(item, membership_proof) } - fn drop(&self, item: Digest, membership_proof: &MsMembershipProof) -> RemovalRecord { - self.kernel.drop(item, membership_proof) + pub async fn drop(&self, item: Digest, membership_proof: &MsMembershipProof) -> RemovalRecord { + let accumulator = self.accumulator().await; + accumulator.drop(item, membership_proof) } - fn add(&mut self, addition_record: &AdditionRecord) { - let new_chunk: Option<(u64, Chunk)> = self.kernel.add_helper(addition_record); + pub async fn add(&mut self, addition_record: &AdditionRecord) { + let new_chunk: Option<(u64, Chunk)> = self.add_helper(addition_record).await; match new_chunk { None => (), Some((chunk_index, chunk)) => { // Sanity check to verify that we agree on the index assert_eq!( chunk_index, - self.chunks.len(), + self.chunks.len().await, "Length/index must agree when inserting a chunk into an archival node" ); - self.chunks.push(chunk); + self.chunks.push(chunk).await; } } } - fn remove(&mut self, removal_record: &RemovalRecord) { - let new_chunks: HashMap = self.kernel.remove_helper(removal_record); - // note: set_many() is atomic. - self.chunks.set_many(new_chunks); + pub async fn remove(&mut self, removal_record: &RemovalRecord) { + let new_chunks: HashMap = self.remove_helper(removal_record).await; + self.chunks.set_many(new_chunks).await; } - fn hash(&self) -> Digest { - self.accumulator().hash() + pub async fn hash(&self) -> Digest { + self.accumulator().await.hash() } - /// Apply a list of removal records while keeping a list of mutator-set membership proofs - /// updated - fn batch_remove( + /// Apply a list of removal records while keeping a list of mutator set membership proofs + /// up-to-date + pub async fn batch_remove( &mut self, removal_records: Vec, preserved_membership_proofs: &mut [&mut MsMembershipProof], ) { - let chunk_index_to_chunk_mutation = self - .kernel - .batch_remove(removal_records, preserved_membership_proofs); + // update the active window and inactive MMR + let mut kernel = MutatorSetAccumulator { + aocl: self.aocl.to_accumulator_async().await, + swbf_inactive: self.swbf_inactive.to_accumulator_async().await, + swbf_active: self.swbf_active.clone(), + }; + let chunk_index_to_chunk_new_state = + kernel.batch_remove(removal_records, preserved_membership_proofs); + self.swbf_active = kernel.swbf_active; - // note: set_many() is atomic. - self.chunks.set_many(chunk_index_to_chunk_mutation) + // extract modified leafs with indices + let mut indices_and_new_leafs = chunk_index_to_chunk_new_state + .iter() + .map(|(index, chunk)| (*index, Hash::hash(chunk))) + .collect_vec(); + indices_and_new_leafs.sort_by_key(|(i, _d)| *i); + + // compile list of MMR membership proofs to be preserved + let mut preserved_mmr_membership_proofs = preserved_membership_proofs + .iter_mut() + .flat_map(|msmp| { + msmp.target_chunks + .dictionary + .iter_mut() + .map(|(_index, (mmrmp, _chunk))| mmrmp) + }) + .collect_vec(); + + // Apply the batch-update to the inactive part of the sliding window Bloom filter. + // This updates both the inactive part of the SWBF and the MMR membership proofs + self.swbf_inactive + .batch_mutate_leaf_and_update_mps( + &mut preserved_mmr_membership_proofs, + indices_and_new_leafs, + ) + .await; + + self.chunks.set_many(chunk_index_to_chunk_new_state).await } } -/// Methods that only work when implementing using archival MMRs as the underlying two MMRs impl ArchivalMutatorSet where - MmrStorage: StorageVec, - ChunkStorage: StorageVec, + MmrStorage: StorageVec + Send + Sync, + ChunkStorage: StorageVec + StorageVecStream + Send + Sync, { - pub fn new_empty(aocl: MmrStorage, swbf_inactive: MmrStorage, chunks: ChunkStorage) -> Self { - assert_eq!(0, aocl.len()); - assert_eq!(0, swbf_inactive.len()); - assert_eq!(0, chunks.len()); - let aocl: ArchivalMmr = ArchivalMmr::new(aocl); - let swbf_inactive: ArchivalMmr = ArchivalMmr::new(swbf_inactive); - Self { - kernel: MutatorSetKernel { - aocl, - swbf_inactive, - swbf_active: ActiveWindow::new(), - }, - chunks, - } - } - - pub fn new_or_restore( + pub async fn new_empty( aocl: MmrStorage, swbf_inactive: MmrStorage, chunks: ChunkStorage, - active_window: ActiveWindow, ) -> Self { - let aocl: ArchivalMmr = ArchivalMmr::new(aocl); - let swbf_inactive: ArchivalMmr = ArchivalMmr::new(swbf_inactive); - + assert_eq!(0, aocl.len().await); + assert_eq!(0, swbf_inactive.len().await); + assert_eq!(0, chunks.len().await); + let aocl: ArchivalMmr = ArchivalMmr::new(aocl).await; + let swbf_inactive: ArchivalMmr = ArchivalMmr::new(swbf_inactive).await; Self { - kernel: MutatorSetKernel { - aocl, - swbf_inactive, - swbf_active: active_window, - }, + aocl, + swbf_inactive, + swbf_active: ActiveWindow::new(), chunks, } } /// Returns an authentication path for an element in the append-only commitment list - pub fn get_aocl_authentication_path( + pub async fn get_aocl_authentication_path( &self, index: u64, ) -> Result, Box> { - if self.kernel.aocl.count_leaves() <= index { - return Err(Box::new( - MutatorSetKernelError::RequestedAoclAuthPathOutOfBounds(( - index, - self.kernel.aocl.count_leaves(), - )), - )); + if self.aocl.count_leaves().await <= index { + return Err(Box::new(MutatorSetError::RequestedAoclAuthPathOutOfBounds( + (index, self.aocl.count_leaves().await), + ))); } - Ok(self.kernel.aocl.prove_membership(index).0) + Ok(self.aocl.prove_membership_async(index).await) } /// Returns an authentication path for a chunk in the sliding window Bloom filter - pub fn get_chunk_and_auth_path( + pub async fn get_chunk_and_auth_path( &self, chunk_index: u64, ) -> Result<(mmr::mmr_membership_proof::MmrMembershipProof, Chunk), Box> { - if self.kernel.swbf_inactive.count_leaves() <= chunk_index { - return Err(Box::new( - MutatorSetKernelError::RequestedSwbfAuthPathOutOfBounds(( - chunk_index, - self.kernel.swbf_inactive.count_leaves(), - )), - )); + if self.swbf_inactive.count_leaves().await <= chunk_index { + return Err(Box::new(MutatorSetError::RequestedSwbfAuthPathOutOfBounds( + (chunk_index, self.swbf_inactive.count_leaves().await), + ))); } let chunk_auth_path: mmr::mmr_membership_proof::MmrMembershipProof = - self.kernel.swbf_inactive.prove_membership(chunk_index).0; + self.swbf_inactive.prove_membership_async(chunk_index).await; // This check should never fail. It would mean that chunks are missing but that the // archival MMR has the membership proof for the chunk. That would be a programming // error. assert!( - self.chunks.len() > chunk_index, + self.chunks.len().await > chunk_index, "Chunks must be known if its authentication path is known." ); - let chunk = self.chunks.get(chunk_index); + let chunk = self.chunks.get(chunk_index).await; Ok((chunk_auth_path, chunk)) } @@ -186,22 +202,22 @@ where /// Restore membership_proof. If called on someone else's UTXO, this leaks privacy. In this case, /// caller is better off using `get_aocl_authentication_path` and `get_chunk_and_auth_path` for the /// relevant indices. - pub fn restore_membership_proof( + pub async fn restore_membership_proof( &self, item: Digest, sender_randomness: Digest, receiver_preimage: Digest, aocl_index: u64, ) -> Result> { - if self.kernel.aocl.is_empty() { - return Err(Box::new(MutatorSetKernelError::MutatorSetIsEmpty)); + if self.aocl.is_empty().await { + return Err(Box::new(MutatorSetError::MutatorSetIsEmpty)); } - let auth_path_aocl = self.get_aocl_authentication_path(aocl_index)?; + let auth_path_aocl = self.get_aocl_authentication_path(aocl_index).await?; let swbf_indices = get_swbf_indices(item, sender_randomness, receiver_preimage, aocl_index); - let batch_index = self.kernel.get_batch_index(); - let window_start = batch_index as u128 * CHUNK_SIZE as u128; + let batch_index = self.get_batch_index_async().await; + let window_start = batch_index * CHUNK_SIZE as u128; let chunk_indices: BTreeSet = swbf_indices .iter() @@ -209,13 +225,17 @@ where .map(|bi| (*bi / CHUNK_SIZE as u128) as u64) .collect(); let mut target_chunks: ChunkDictionary = ChunkDictionary::default(); - for (chunk_index, chunk) in self.chunks.many_iter(chunk_indices) { + + let stream = self.chunks.stream_many(chunk_indices).await; + pin_mut!(stream); // needed for iteration + + while let Some((chunk_index, chunk)) = stream.next().await { assert!( - self.chunks.len() > chunk_index, + self.chunks.len().await > chunk_index, "Chunks must be known if its authentication path is known." ); let chunk_membership_proof: mmr::mmr_membership_proof::MmrMembershipProof = - self.kernel.swbf_inactive.prove_membership(chunk_index).0; + self.swbf_inactive.prove_membership_async(chunk_index).await; target_chunks .dictionary .insert(chunk_index, (chunk_membership_proof, chunk.to_owned())); @@ -232,10 +252,10 @@ where /// Revert the `RemovalRecord` by removing the indices that /// were inserted by it. These live in either the active window, or /// in a relevant chunk. - pub fn revert_remove(&mut self, removal_record: &RemovalRecord) { + pub async fn revert_remove(&mut self, removal_record: &RemovalRecord) { let removal_record_indices: Vec = removal_record.absolute_indices.to_vec(); - let batch_index = self.kernel.get_batch_index(); - let active_window_start = batch_index as u128 * CHUNK_SIZE as u128; + let batch_index = self.get_batch_index_async().await; + let active_window_start = batch_index * CHUNK_SIZE as u128; let mut chunkidx_to_difference_dict: HashMap = HashMap::new(); // Populate the dictionary by iterating over all the removal @@ -245,7 +265,7 @@ where for rr_index in removal_record_indices { if rr_index >= active_window_start { let relative_index = (rr_index - active_window_start) as u32; - self.kernel.swbf_active.remove(relative_index); + self.swbf_active.remove(relative_index); } else { let chunkidx = (rr_index / CHUNK_SIZE as u128) as u64; let relative_index = (rr_index % CHUNK_SIZE as u128) as u32; @@ -258,24 +278,24 @@ where for (chunk_index, revert_chunk) in chunkidx_to_difference_dict { // For each chunk, subtract the difference from the chunk. - let previous_chunk = self.chunks.get(chunk_index); + let previous_chunk = self.chunks.get(chunk_index).await; let mut new_chunk = previous_chunk; new_chunk.subtract(revert_chunk.clone()); // update archival mmr - self.kernel - .swbf_inactive - .mutate_leaf_raw(chunk_index, Hash::hash(&new_chunk)); + self.swbf_inactive + .mutate_leaf(chunk_index, Hash::hash(&new_chunk)) + .await; - self.chunks.set(chunk_index, new_chunk); + self.chunks.set(chunk_index, new_chunk).await; } } /// Determine whether the given `AdditionRecord` can be reversed. /// Equivalently, determine if it was added last. - pub fn add_is_reversible(&mut self, addition_record: &AdditionRecord) -> bool { - let leaf_index = self.kernel.aocl.count_leaves() - 1; - let digest = self.kernel.aocl.get_leaf(leaf_index); + pub async fn add_is_reversible(&mut self, addition_record: &AdditionRecord) -> bool { + let leaf_index = self.aocl.count_leaves().await - 1; + let digest = self.aocl.get_leaf_async(leaf_index).await; addition_record.canonical_commitment == digest } @@ -285,63 +305,174 @@ where /// - If at a boundary where the active window slides, remove a chunk /// from the inactive window, and slide window back by putting the /// last inactive chunk in the active window. - pub fn revert_add(&mut self, addition_record: &AdditionRecord) { - let removed_add_index = self.kernel.aocl.count_leaves() - 1; + pub async fn revert_add(&mut self, addition_record: &AdditionRecord) { + let removed_add_index = self.aocl.count_leaves().await - 1; // 1. Remove last leaf from AOCL - let digest = self.kernel.aocl.remove_last_leaf().unwrap(); + let digest = self.aocl.remove_last_leaf_async().await.unwrap(); assert_eq!(addition_record.canonical_commitment, digest); // 2. Possibly shrink bloom filter by moving a chunk back into active window // // This happens when the batch index changes (i.e. every `BATCH_SIZE` addition). - if !MutatorSetKernel::>::window_slides_back(removed_add_index) - { + if !MutatorSetAccumulator::window_slides_back(removed_add_index) { return; } // 2.a. Remove a chunk from inactive window - let _digest = self.kernel.swbf_inactive.remove_last_leaf(); - let last_inactive_chunk = self.chunks.pop().unwrap(); + let _digest = self.swbf_inactive.remove_last_leaf_async().await; + let last_inactive_chunk = self.chunks.pop().await.unwrap(); // 2.b. Slide active window back by putting `last_inactive_chunk` back - self.kernel - .swbf_active - .slide_window_back(&last_inactive_chunk); + self.swbf_active.slide_window_back(&last_inactive_chunk); } /// Determine whether the index `index` is set in the Bloom /// filter, whether in the active window, or in some chunk. - pub fn bloom_filter_contains(&mut self, index: u128) -> bool { - let batch_index = self.kernel.get_batch_index(); - let active_window_start = batch_index as u128 * CHUNK_SIZE as u128; + pub async fn bloom_filter_contains(&mut self, index: u128) -> bool { + let batch_index = self.get_batch_index_async().await; + let active_window_start = batch_index * CHUNK_SIZE as u128; if index >= active_window_start { let relative_index = (index - active_window_start) as u32; - self.kernel.swbf_active.contains(relative_index) + self.swbf_active.contains(relative_index) } else { let chunk_index = (index / CHUNK_SIZE as u128) as u64; let relative_index = (index % CHUNK_SIZE as u128) as u32; - let relevant_chunk = self.chunks.get(chunk_index); + let relevant_chunk = self.chunks.get(chunk_index).await; relevant_chunk.contains(relative_index) } } - pub fn accumulator(&self) -> MutatorSetAccumulator { - let set_commitment = MutatorSetKernel::> { - aocl: MmrAccumulator::init( - self.kernel.aocl.get_peaks(), - self.kernel.aocl.count_leaves(), - ), + pub async fn accumulator(&self) -> MutatorSetAccumulator { + MutatorSetAccumulator { + aocl: MmrAccumulator::init(self.aocl.get_peaks().await, self.aocl.count_leaves().await), swbf_inactive: MmrAccumulator::init( - self.kernel.swbf_inactive.get_peaks(), - self.kernel.swbf_inactive.count_leaves(), + self.swbf_inactive.get_peaks().await, + self.swbf_inactive.count_leaves().await, ), - swbf_active: self.kernel.swbf_active.clone(), - }; - MutatorSetAccumulator { - kernel: set_commitment, + swbf_active: self.swbf_active.clone(), + } + } + + pub async fn get_batch_index_async(&self) -> u128 { + match self.aocl.count_leaves().await { + 0 => 0, + n => (n - 1) as u128 / BATCH_SIZE as u128, + } + } + + /// Helper function. Like `add` but also returns the chunk that + /// was added to the inactive SWBF if the window slid (and None + /// otherwise) since this is needed by the archival version of + /// the mutator set. + pub async fn add_helper(&mut self, addition_record: &AdditionRecord) -> Option<(u64, Chunk)> { + // Notice that `add` cannot return a membership proof since `add` cannot know the + // randomness that was used to create the commitment. This randomness can only be know + // by the sender and/or receiver of the UTXO. And `add` must be run be all nodes keeping + // track of the mutator set. + + // add to list + let item_index = self.aocl.count_leaves().await; + self.aocl + .append(addition_record.canonical_commitment.to_owned()) + .await; // ignore auth path + + if !Self::window_slides(item_index) { + return None; + } + + // if window slides, update filter + // First update the inactive part of the SWBF, the SWBF MMR + let new_chunk: Chunk = self.swbf_active.slid_chunk(); + let chunk_digest: Digest = Hash::hash(&new_chunk); + let new_chunk_index = self.swbf_inactive.count_leaves().await; + self.swbf_inactive.append(chunk_digest).await; // ignore auth path + + // Then move window to the right, equivalent to moving values + // inside window to the left. + self.swbf_active.slide_window(); + + // Return the chunk that was added to the inactive part of the SWBF. + // This chunk is needed by the Archival mutator set. The Regular + // mutator set can ignore it. + Some((new_chunk_index, new_chunk)) + } + + /// Determine if the window slides before absorbing an item, + /// given the index of the to-be-added item. + pub fn window_slides(added_index: u64) -> bool { + added_index != 0 && added_index % BATCH_SIZE as u64 == 0 + + // example cases: + // - index == 0 we don't care about + // - index == 1 does not generate a slide + // - index == n * BATCH_SIZE generates a slide for any n + } + + /// Remove a record and return the chunks that have been updated in this process, + /// after applying the update. Does not mutate the removal record. + pub async fn remove_helper(&mut self, removal_record: &RemovalRecord) -> HashMap { + let batch_index = self.get_batch_index_async().await; + let active_window_start = batch_index * CHUNK_SIZE as u128; + + // insert all indices + let mut new_target_chunks: ChunkDictionary = removal_record.target_chunks.clone(); + let chunkindices_to_indices_dict: HashMap> = + removal_record.get_chunkidx_to_indices_dict(); + + for (chunk_index, indices) in chunkindices_to_indices_dict { + if chunk_index >= batch_index as u64 { + // index is in the active part, so insert it in the active part of the Bloom filter + for index in indices { + let relative_index = (index - active_window_start) as u32; + self.swbf_active.insert(relative_index); + } + + continue; + } + + // If chunk index is not in the active part, insert the index into the relevant chunk + let new_target_chunks_clone = new_target_chunks.clone(); + let count_leaves = self.aocl.count_leaves().await; + let relevant_chunk = new_target_chunks + .dictionary + .get_mut(&chunk_index) + .unwrap_or_else(|| { + panic!( + "Can't get chunk index {chunk_index} from removal record dictionary! dictionary: {:?}\nAOCL size: {}\nbatch index: {}\nRemoval record: {:?}", + new_target_chunks_clone.dictionary, + count_leaves, + batch_index, + removal_record + ) + }); + for index in indices { + let relative_index = (index % CHUNK_SIZE as u128) as u32; + relevant_chunk.1.insert(relative_index); + } } + + // update mmr + // to do this, we need to keep track of all membership proofs + let target_chunk_indices = new_target_chunks.dictionary.keys().cloned().collect_vec(); + let all_leafs = new_target_chunks + .dictionary + .values() + .map(|(_p, chunk)| Hash::hash(chunk)); + let mutation_data = target_chunk_indices.into_iter().zip(all_leafs).collect(); + + // If we want to update the membership proof with this removal, we + // could use the below function. + self.swbf_inactive + .batch_mutate_leaf_and_update_mps(&mut [], mutation_data) + .await; + + new_target_chunks + .dictionary + .into_iter() + .map(|(chunk_index, (_mp, chunk))| (chunk_index, chunk)) + .collect() } } @@ -351,7 +482,7 @@ mod archival_mutator_set_tests { use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; - use crate::util_types::mutator_set::mutator_set_trait::commit; + use crate::util_types::mutator_set::commit; use crate::util_types::mutator_set::removal_record::AbsoluteIndexSet; use crate::util_types::mutator_set::shared::{BATCH_SIZE, NUM_TRIALS}; use crate::util_types::test_shared::mutator_set::{ @@ -360,9 +491,9 @@ mod archival_mutator_set_tests { use super::*; - #[test] - fn archival_set_commitment_test() { - let mut rms = empty_rusty_mutator_set(); + #[tokio::test] + async fn archival_set_commitment_test() { + let mut rms = empty_rusty_mutator_set().await; let archival_mutator_set = rms.ams_mut(); let num_additions = 65; @@ -373,28 +504,27 @@ mod archival_mutator_set_tests { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); - let membership_proof = - archival_mutator_set.prove(item, sender_randomness, receiver_preimage); + let membership_proof = archival_mutator_set + .prove(item, sender_randomness, receiver_preimage) + .await; let res = MsMembershipProof::batch_update_from_addition( &mut membership_proofs.iter_mut().collect::>(), &items, - &archival_mutator_set.kernel, + &archival_mutator_set.accumulator().await, &addition_record, ); assert!(res.is_ok()); - archival_mutator_set.add(&addition_record); - assert!(archival_mutator_set.verify(item, &membership_proof)); + archival_mutator_set.add(&addition_record).await; + assert!(archival_mutator_set.verify(item, &membership_proof).await); // Verify that we can just read out the same membership proofs from the // archival MMR as those we get through the membership proof book keeping. - let archival_membership_proof = match archival_mutator_set.restore_membership_proof( - item, - sender_randomness, - receiver_preimage, - i, - ) { + let archival_membership_proof = match archival_mutator_set + .restore_membership_proof(item, sender_randomness, receiver_preimage, i) + .await + { Err(err) => panic!( "Failed to get membership proof from archival mutator set: {}", err @@ -417,9 +547,9 @@ mod archival_mutator_set_tests { } } - #[test] - fn archival_mutator_set_revert_add_test() { - let mut rms = empty_rusty_mutator_set(); + #[tokio::test] + async fn archival_mutator_set_revert_add_test() { + let mut rms = empty_rusty_mutator_set().await; let archival_mutator_set = rms.ams_mut(); // Repeatedly insert `AdditionRecord` into empty MutatorSet and revert it @@ -428,15 +558,15 @@ mod archival_mutator_set_tests { // to being empty on every iteration. for _ in 0..2 * BATCH_SIZE { let (item, addition_record, membership_proof) = - prepare_random_addition(archival_mutator_set); + prepare_random_addition(archival_mutator_set).await; - let commitment_before_add = archival_mutator_set.hash(); - archival_mutator_set.add(&addition_record); - assert!(archival_mutator_set.verify(item, &membership_proof)); + let commitment_before_add = archival_mutator_set.hash().await; + archival_mutator_set.add(&addition_record).await; + assert!(archival_mutator_set.verify(item, &membership_proof).await); - archival_mutator_set.revert_add(&addition_record); - let commitment_after_revert = archival_mutator_set.hash(); - assert!(!archival_mutator_set.verify(item, &membership_proof)); + archival_mutator_set.revert_add(&addition_record).await; + let commitment_after_revert = archival_mutator_set.hash().await; + assert!(!archival_mutator_set.verify(item, &membership_proof).await); assert_eq!(commitment_before_add, commitment_after_revert); } @@ -446,12 +576,12 @@ mod archival_mutator_set_tests { // Insert a number of `AdditionRecord`s into MutatorSet and assert their membership. for _ in 0..n_iterations { - let record = prepare_random_addition(archival_mutator_set); + let record = prepare_random_addition(archival_mutator_set).await; let (item, addition_record, membership_proof) = record.clone(); records.push(record); - commitments_before.push(archival_mutator_set.hash()); - archival_mutator_set.add(&addition_record); - assert!(archival_mutator_set.verify(item, &membership_proof)); + commitments_before.push(archival_mutator_set.hash().await); + archival_mutator_set.add(&addition_record).await; + assert!(archival_mutator_set.verify(item, &membership_proof).await); } assert_eq!(n_iterations, records.len()); @@ -460,9 +590,9 @@ mod archival_mutator_set_tests { // // This reaches the sliding window every `BATCH_SIZE` iteration. for (item, addition_record, membership_proof) in records.into_iter().rev() { - archival_mutator_set.revert_add(&addition_record); - let commitment_after_revert = archival_mutator_set.hash(); - assert!(!archival_mutator_set.verify(item, &membership_proof)); + archival_mutator_set.revert_add(&addition_record).await; + let commitment_after_revert = archival_mutator_set.hash().await; + assert!(!archival_mutator_set.verify(item, &membership_proof).await); let commitment_before_add = commitments_before.pop().unwrap(); assert_eq!( @@ -473,8 +603,8 @@ mod archival_mutator_set_tests { } } - #[test] - fn bloom_filter_is_reversible() { + #[tokio::test] + async fn bloom_filter_is_reversible() { // With the `3086841408u32` seed a collission is generated at i = 1 and i = 38, on index 510714 let seed_integer = 3086841408u32; let seed = seed_integer.to_be_bytes(); @@ -485,7 +615,7 @@ mod archival_mutator_set_tests { let mut seeded_rng = StdRng::from_seed(seed_as_bytes); - let mut rms = empty_rusty_mutator_set(); + let mut rms = empty_rusty_mutator_set().await; let archival_mutator_set = rms.ams_mut(); // Also keep track of a mutator set accumulator to verify that this uses an invertible Bloom filter @@ -498,13 +628,13 @@ mod archival_mutator_set_tests { let added_items = 50; for current_item in 0..added_items { let (item, addition_record, membership_proof) = - prepare_seeded_prng_addition(archival_mutator_set, &mut seeded_rng); + prepare_seeded_prng_addition(archival_mutator_set, &mut seeded_rng).await; // Update all MPs MsMembershipProof::batch_update_from_addition( &mut mps.iter_mut().collect_vec(), &items.iter().cloned().collect_vec(), - &archival_mutator_set.accumulator().kernel, + &archival_mutator_set.accumulator().await, &addition_record, ) .unwrap(); @@ -512,7 +642,7 @@ mod archival_mutator_set_tests { items.push(item); mps.push(membership_proof.clone()); - archival_mutator_set.add(&addition_record); + archival_mutator_set.add(&addition_record).await; msa.add(&addition_record); let indices = membership_proof.compute_indices(item).to_vec(); @@ -534,10 +664,8 @@ mod archival_mutator_set_tests { println!("collission: {saw_collission_at:?}"); // Verify that the MPs with colliding indices are still valid - for ms in [ - &msa as &dyn MutatorSet, - archival_mutator_set as &dyn MutatorSet, - ] { + { + let ms = &msa; assert!( ms.verify( items[saw_collission_at.0 .0], @@ -553,29 +681,51 @@ mod archival_mutator_set_tests { "Second colliding MS MP must be valid" ); } + { + let ms = &archival_mutator_set; + assert!( + ms.verify( + items[saw_collission_at.0 .0], + &mps[saw_collission_at.0 .0].clone() + ) + .await, + "First colliding MS MP must be valid" + ); + assert!( + ms.verify( + items[saw_collission_at.0 .1], + &mps[saw_collission_at.0 .1].clone() + ) + .await, + "Second colliding MS MP must be valid" + ); + } // Remove 1st colliding element assert!( - !archival_mutator_set.bloom_filter_contains(saw_collission_at.1), + !archival_mutator_set + .bloom_filter_contains(saw_collission_at.1) + .await, "Bloom filter must be empty when no removal records have been applied" ); - let digest_before_removal = archival_mutator_set.hash(); - let rem0 = - archival_mutator_set.drop(items[saw_collission_at.0 .0], &mps[saw_collission_at.0 .0]); - archival_mutator_set.remove(&rem0); + let digest_before_removal = archival_mutator_set.hash().await; + let rem0 = archival_mutator_set + .drop(items[saw_collission_at.0 .0], &mps[saw_collission_at.0 .0]) + .await; + archival_mutator_set.remove(&rem0).await; msa.remove(&rem0); assert!( - archival_mutator_set.bloom_filter_contains(saw_collission_at.1), + archival_mutator_set + .bloom_filter_contains(saw_collission_at.1) + .await, "Bloom filter must have collission bit set after 1st removal" ); // Update all MPs MsMembershipProof::batch_update_from_remove(&mut mps.iter_mut().collect_vec(), &rem0) .unwrap(); - for ms in [ - &msa as &dyn MutatorSet, - archival_mutator_set as &dyn MutatorSet, - ] { + { + let ms = &msa; assert!( !ms.verify( items[saw_collission_at.0 .0], @@ -584,24 +734,36 @@ mod archival_mutator_set_tests { "First colliding MS MP must be invalid after removal" ); } + { + let ms = &archival_mutator_set; + assert!( + !ms.verify( + items[saw_collission_at.0 .0], + &mps[saw_collission_at.0 .0].clone() + ) + .await, + "First colliding MS MP must be invalid after removal" + ); + } // Remove 2nd colliding element - let rem1 = - archival_mutator_set.drop(items[saw_collission_at.0 .1], &mps[saw_collission_at.0 .1]); - archival_mutator_set.remove(&rem1); + let rem1 = archival_mutator_set + .drop(items[saw_collission_at.0 .1], &mps[saw_collission_at.0 .1]) + .await; + archival_mutator_set.remove(&rem1).await; msa.remove(&rem1); assert!( - archival_mutator_set.bloom_filter_contains(saw_collission_at.1), + archival_mutator_set + .bloom_filter_contains(saw_collission_at.1) + .await, "Bloom filter must have collission bit set after 2nd removal" ); // Update all MPs MsMembershipProof::batch_update_from_remove(&mut mps.iter_mut().collect_vec(), &rem1) .unwrap(); - for ms in [ - &msa as &dyn MutatorSet, - archival_mutator_set as &dyn MutatorSet, - ] { + { + let ms = &msa; assert!( !ms.verify( items[saw_collission_at.0 .1], @@ -610,15 +772,28 @@ mod archival_mutator_set_tests { "Second colliding MS MP must be invalid after removal" ); } + { + let ms = &archival_mutator_set; + assert!( + !ms.verify( + items[saw_collission_at.0 .1], + &mps[saw_collission_at.0 .1].clone() + ) + .await, + "Second colliding MS MP must be invalid after removal" + ); + } // Verify that AMS and MSA agree now that we know we have an index in the Bloom filter // that was set twice - assert_eq!(archival_mutator_set.hash(), msa.hash(), "Archival MS and MS accumulator must agree also with collissions in the Bloom filter indices"); + assert_eq!(archival_mutator_set.hash().await, msa.hash(), "Archival MS and MS accumulator must agree also with collissions in the Bloom filter indices"); // Reverse 1st removal - archival_mutator_set.revert_remove(&rem0); + archival_mutator_set.revert_remove(&rem0).await; assert!( - archival_mutator_set.bloom_filter_contains(saw_collission_at.1), + archival_mutator_set + .bloom_filter_contains(saw_collission_at.1) + .await, "Bloom filter must have collission bit set after 1st removal revert" ); @@ -626,15 +801,17 @@ mod archival_mutator_set_tests { for (i, (mp, &itm)) in mps.iter_mut().zip_eq(items.iter()).enumerate() { mp.revert_update_from_remove(&rem0).unwrap(); assert!( - i == saw_collission_at.0 .1 || archival_mutator_set.verify(itm, mp), + i == saw_collission_at.0 .1 || archival_mutator_set.verify(itm, mp).await, "MS MP must be valid after reversing a removal update" ); } // Reverse 2nd removal - archival_mutator_set.revert_remove(&rem1); + archival_mutator_set.revert_remove(&rem1).await; assert!( - !archival_mutator_set.bloom_filter_contains(saw_collission_at.1), + !archival_mutator_set + .bloom_filter_contains(saw_collission_at.1) + .await, "Bloom filter must not have collission bit set after 2nd removal revert" ); @@ -642,12 +819,12 @@ mod archival_mutator_set_tests { for (mp, &itm) in mps.iter_mut().zip_eq(items.iter()) { mp.revert_update_from_remove(&rem1).unwrap(); assert!( - archival_mutator_set.verify(itm, mp), + archival_mutator_set.verify(itm, mp).await, "MS MP must be valid after reversing a removal update" ); } - assert_eq!(digest_before_removal, archival_mutator_set.hash(), "Digest of archival MS must agree before removals and after reversion of those removals"); + assert_eq!(digest_before_removal, archival_mutator_set.hash().await, "Digest of archival MS must agree before removals and after reversion of those removals"); assert_eq!( added_items, mps.len(), @@ -656,31 +833,31 @@ mod archival_mutator_set_tests { } #[should_panic(expected = "Decremented integer is already zero.")] - #[test] - fn revert_remove_from_active_bloom_filter_panic() { - let mut rms = empty_rusty_mutator_set(); + #[tokio::test] + async fn revert_remove_from_active_bloom_filter_panic() { + let mut rms = empty_rusty_mutator_set().await; let archival_mutator_set = rms.ams_mut(); - let record = prepare_random_addition(archival_mutator_set); + let record = prepare_random_addition(archival_mutator_set).await; let (item, addition_record, membership_proof) = record; - archival_mutator_set.add(&addition_record); + archival_mutator_set.add(&addition_record).await; - let removal_record = archival_mutator_set.drop(item, &membership_proof); + let removal_record = archival_mutator_set.drop(item, &membership_proof).await; // This next line should panic, as we're attempting to remove an index that is not present // in the active window - archival_mutator_set.revert_remove(&removal_record); + archival_mutator_set.revert_remove(&removal_record).await; } #[should_panic(expected = "Attempted to remove index that was not present in chunk.")] - #[test] - fn revert_remove_invalid_panic() { - let mut rms = empty_rusty_mutator_set(); + #[tokio::test] + async fn revert_remove_invalid_panic() { + let mut rms = empty_rusty_mutator_set().await; let archival_mutator_set = rms.ams_mut(); for _ in 0..2 * BATCH_SIZE { let (_item, addition_record, _membership_proof) = - prepare_random_addition(archival_mutator_set); - archival_mutator_set.add(&addition_record); + prepare_random_addition(archival_mutator_set).await; + archival_mutator_set.add(&addition_record).await; } let mut fake_indices = [2u128; NUM_TRIALS as usize]; @@ -692,23 +869,25 @@ mod archival_mutator_set_tests { // This next line should panic, as we're attempting to remove an index that is not present // in the inactive part of the Bloom filter - archival_mutator_set.revert_remove(&fake_removal_record); + archival_mutator_set + .revert_remove(&fake_removal_record) + .await; } - #[test] - fn archival_mutator_set_revert_remove_test() { - let mut rms = empty_rusty_mutator_set(); + #[tokio::test] + async fn archival_mutator_set_revert_remove_test() { + let mut rms = empty_rusty_mutator_set().await; let archival_mutator_set = rms.ams_mut(); let n_iterations = 11 * BATCH_SIZE as usize; let mut records = Vec::with_capacity(n_iterations); // Insert a number of `AdditionRecord`s into MutatorSet and assert their membership. for _ in 0..n_iterations { - let record = prepare_random_addition(archival_mutator_set); + let record = prepare_random_addition(archival_mutator_set).await; let (item, addition_record, membership_proof) = record.clone(); records.push(record); - archival_mutator_set.add(&addition_record); - assert!(archival_mutator_set.verify(item, &membership_proof)); + archival_mutator_set.add(&addition_record).await; + assert!(archival_mutator_set.verify(item, &membership_proof).await); } for (idx, (item, _addition_record, expired_membership_proof)) in @@ -722,24 +901,39 @@ mod archival_mutator_set_tests { expired_membership_proof.receiver_preimage, expired_membership_proof.auth_path_aocl.leaf_index, ) + .await .unwrap(); - assert!(archival_mutator_set.verify(item, &restored_membership_proof)); + assert!( + archival_mutator_set + .verify(item, &restored_membership_proof) + .await + ); - let removal_record = archival_mutator_set.drop(item, &restored_membership_proof); - let commitment_before_remove = archival_mutator_set.hash(); - archival_mutator_set.remove(&removal_record); - assert!(!archival_mutator_set.verify(item, &restored_membership_proof)); + let removal_record = archival_mutator_set + .drop(item, &restored_membership_proof) + .await; + let commitment_before_remove = archival_mutator_set.hash().await; + archival_mutator_set.remove(&removal_record).await; + assert!( + !archival_mutator_set + .verify(item, &restored_membership_proof) + .await + ); - archival_mutator_set.revert_remove(&removal_record); - let commitment_after_revert = archival_mutator_set.hash(); + archival_mutator_set.revert_remove(&removal_record).await; + let commitment_after_revert = archival_mutator_set.hash().await; assert_eq!(commitment_before_remove, commitment_after_revert); - assert!(archival_mutator_set.verify(item, &restored_membership_proof)); + assert!( + archival_mutator_set + .verify(item, &restored_membership_proof) + .await + ); } } - #[test] - fn archival_set_batch_remove_simple_test() { - let mut rms = empty_rusty_mutator_set(); + #[tokio::test] + async fn archival_set_batch_remove_simple_test() { + let mut rms = empty_rusty_mutator_set().await; let archival_mutator_set = rms.ams_mut(); let num_additions = 130; @@ -751,18 +945,19 @@ mod archival_mutator_set_tests { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); - let membership_proof = - archival_mutator_set.prove(item, sender_randomness, receiver_preimage); + let membership_proof = archival_mutator_set + .prove(item, sender_randomness, receiver_preimage) + .await; MsMembershipProof::batch_update_from_addition( &mut membership_proofs.iter_mut().collect::>(), &items, - &archival_mutator_set.kernel, + &archival_mutator_set.accumulator().await, &addition_record, ) .expect("MS membership update must work"); - archival_mutator_set.add(&addition_record); + archival_mutator_set.add(&addition_record).await; membership_proofs.push(membership_proof); items.push(item); @@ -770,21 +965,23 @@ mod archival_mutator_set_tests { let mut removal_records: Vec = vec![]; for (mp, &item) in membership_proofs.iter().zip_eq(items.iter()) { - removal_records.push(archival_mutator_set.drop(item, mp)); + removal_records.push(archival_mutator_set.drop(item, mp).await); } for (mp, &item) in membership_proofs.iter().zip_eq(items.iter()) { - assert!(archival_mutator_set.verify(item, mp)); + assert!(archival_mutator_set.verify(item, mp).await); } - archival_mutator_set.batch_remove(removal_records, &mut []); + archival_mutator_set + .batch_remove(removal_records, &mut []) + .await; for (mp, &item) in membership_proofs.iter().zip_eq(items.iter()) { - assert!(!archival_mutator_set.verify(item, mp)); + assert!(!archival_mutator_set.verify(item, mp).await); } } - #[test] - fn archival_set_batch_remove_dynamic_test() { - let mut rms = empty_rusty_mutator_set(); + #[tokio::test] + async fn archival_set_batch_remove_dynamic_test() { + let mut rms = empty_rusty_mutator_set().await; let archival_mutator_set = rms.ams_mut(); let num_additions = 4 * BATCH_SIZE; @@ -797,18 +994,19 @@ mod archival_mutator_set_tests { let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); - let membership_proof = - archival_mutator_set.prove(item, sender_randomness, receiver_preimage); + let membership_proof = archival_mutator_set + .prove(item, sender_randomness, receiver_preimage) + .await; MsMembershipProof::batch_update_from_addition( &mut membership_proofs.iter_mut().collect::>(), &items, - &archival_mutator_set.kernel, + &archival_mutator_set.accumulator().await, &addition_record, ) .expect("MS membership update must work"); - archival_mutator_set.add(&addition_record); + archival_mutator_set.add(&addition_record).await; membership_proofs.push(membership_proof); items.push(item); @@ -821,19 +1019,21 @@ mod archival_mutator_set_tests { let skipped = rng.gen_range(0.0..1.0) < remove_factor; skipped_removes.push(skipped); if !skipped { - removal_records.push(archival_mutator_set.drop(item, mp)); + removal_records.push(archival_mutator_set.drop(item, mp).await); } } for (mp, &item) in membership_proofs.iter().zip_eq(items.iter()) { - assert!(archival_mutator_set.verify(item, mp)); + assert!(archival_mutator_set.verify(item, mp).await); } - let commitment_prior_to_removal = archival_mutator_set.hash(); - archival_mutator_set.batch_remove( - removal_records.clone(), - &mut membership_proofs.iter_mut().collect::>(), - ); + let commitment_prior_to_removal = archival_mutator_set.hash().await; + archival_mutator_set + .batch_remove( + removal_records.clone(), + &mut membership_proofs.iter_mut().collect::>(), + ) + .await; for ((mp, &item), skipped) in membership_proofs .iter() @@ -841,22 +1041,22 @@ mod archival_mutator_set_tests { .zip_eq(skipped_removes.into_iter()) { // If this removal record was not applied, then the membership proof must verify - assert_eq!(skipped, archival_mutator_set.verify(item, mp)); + assert_eq!(skipped, archival_mutator_set.verify(item, mp).await); } // Verify that removal record indices were applied. If not, below function call will crash. for removal_record in removal_records.iter() { - archival_mutator_set.revert_remove(removal_record); + archival_mutator_set.revert_remove(removal_record).await; } // Verify that mutator set before and after removal are the same - assert_eq!(commitment_prior_to_removal, archival_mutator_set.hash(), "After reverting the removes, mutator set's commitment must equal the one before elements were removed."); + assert_eq!(commitment_prior_to_removal, archival_mutator_set.hash().await, "After reverting the removes, mutator set's commitment must equal the one before elements were removed."); } } - fn prepare_seeded_prng_addition< - MmrStorage: StorageVec, - ChunkStorage: StorageVec, + async fn prepare_seeded_prng_addition< + MmrStorage: StorageVec + Send + Sync, + ChunkStorage: StorageVec + Send + Sync, >( archival_mutator_set: &mut ArchivalMutatorSet, rng: &mut StdRng, @@ -865,19 +1065,24 @@ mod archival_mutator_set_tests { let sender_randomness: Digest = rng.gen(); let receiver_preimage: Digest = rng.gen(); let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); - let membership_proof = - archival_mutator_set.prove(item, sender_randomness, receiver_preimage); + let membership_proof = archival_mutator_set + .prove(item, sender_randomness, receiver_preimage) + .await; (item, addition_record, membership_proof) } - fn prepare_random_addition, ChunkStorage: StorageVec>( + async fn prepare_random_addition< + MmrStorage: StorageVec + Send + Sync, + ChunkStorage: StorageVec + Send + Sync, + >( archival_mutator_set: &mut ArchivalMutatorSet, ) -> (Digest, AdditionRecord, MsMembershipProof) { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); - let membership_proof = - archival_mutator_set.prove(item, sender_randomness, receiver_preimage); + let membership_proof = archival_mutator_set + .prove(item, sender_randomness, receiver_preimage) + .await; (item, addition_record, membership_proof) } diff --git a/src/util_types/mutator_set/chunk_dictionary.rs b/src/util_types/mutator_set/chunk_dictionary.rs index ac6705aa..9e0ebf31 100644 --- a/src/util_types/mutator_set/chunk_dictionary.rs +++ b/src/util_types/mutator_set/chunk_dictionary.rs @@ -119,16 +119,16 @@ mod chunk_dict_tests { use crate::util_types::mutator_set::shared::CHUNK_SIZE; use crate::util_types::test_shared::mutator_set::random_chunk_dictionary; + use super::super::archival_mmr::mmr_test::mock; use tasm_lib::twenty_first::util_types::algebraic_hasher::AlgebraicHasher; use twenty_first::shared_math::other::random_elements; use twenty_first::shared_math::tip5::{Digest, Tip5}; - use twenty_first::test_shared::mmr::get_rustyleveldb_ammr_from_digests; use twenty_first::util_types::mmr::mmr_membership_proof::MmrMembershipProof; use super::*; - #[test] - fn hash_test() { + #[tokio::test] + async fn hash_test() { type H = Tip5; let chunkdict0 = ChunkDictionary::default(); @@ -138,10 +138,10 @@ mod chunk_dict_tests { // Insert elements let num_leaves = 3; let leaf_hashes: Vec = random_elements(num_leaves); - let archival_mmr = get_rustyleveldb_ammr_from_digests(leaf_hashes); + let archival_mmr = mock::get_ammr_from_digests::(leaf_hashes).await; let key1: u64 = 898989; - let mp1: MmrMembershipProof = archival_mmr.prove_membership(1).0; + let mp1: MmrMembershipProof = archival_mmr.prove_membership_async(1).await; let chunk1: Chunk = { Chunk { relative_indices: (0..CHUNK_SIZE).collect(), @@ -153,7 +153,7 @@ mod chunk_dict_tests { // Insert two more element and verify that the hash is deterministic which implies that the // elements in the preimage are sorted deterministically. let key2: u64 = 8989; - let mp2: MmrMembershipProof = archival_mmr.prove_membership(2).0; + let mp2: MmrMembershipProof = archival_mmr.prove_membership_async(2).await; let mut chunk2 = Chunk::empty_chunk(); chunk2.insert(CHUNK_SIZE / 2 + 1); let value2 = (mp2, chunk2); @@ -199,8 +199,8 @@ mod chunk_dict_tests { assert_ne!(Hash::hash(&chunkdict3), Hash::hash(&chunkdict3_switched)); } - #[test] - fn serialization_test() { + #[tokio::test] + async fn serialization_test() { // TODO: You could argue that this test doesn't belong here, as it tests the behavior of // an imported library. I included it here, though, because the setup seems a bit clumsy // to me so far. @@ -214,8 +214,8 @@ mod chunk_dict_tests { // Build a non-empty chunk dict and verify that it still works let key: u64 = 898989; let leaf_hashes: Vec = random_elements(3); - let archival_mmr = get_rustyleveldb_ammr_from_digests(leaf_hashes); - let mp: MmrMembershipProof = archival_mmr.prove_membership(1).0; + let archival_mmr = mock::get_ammr_from_digests::(leaf_hashes).await; + let mp: MmrMembershipProof = archival_mmr.prove_membership_async(1).await; let chunk = Chunk { relative_indices: (0..CHUNK_SIZE).collect(), }; diff --git a/src/util_types/mutator_set/ms_membership_proof.rs b/src/util_types/mutator_set/ms_membership_proof.rs index fd441a5e..f998351a 100644 --- a/src/util_types/mutator_set/ms_membership_proof.rs +++ b/src/util_types/mutator_set/ms_membership_proof.rs @@ -25,8 +25,8 @@ use twenty_first::util_types::mmr::mmr_trait::Mmr; use super::addition_record::AdditionRecord; use super::chunk_dictionary::{pseudorandom_chunk_dictionary, ChunkDictionary}; +use super::get_swbf_indices; use super::mutator_set_accumulator::MutatorSetAccumulator; -use super::mutator_set_kernel::{get_swbf_indices, MutatorSetKernel}; use super::removal_record::AbsoluteIndexSet; use super::removal_record::RemovalRecord; use super::shared::{ @@ -71,10 +71,10 @@ impl MsMembershipProof { /// Update a list of membership proofs in anticipation of an addition. If successful, /// return (wrapped in an Ok) a vector of all indices of updated membership proofs. - pub fn batch_update_from_addition>( + pub fn batch_update_from_addition( membership_proofs: &mut [&mut Self], own_items: &[Digest], - mutator_set: &MutatorSetKernel, + mutator_set: &MutatorSetAccumulator, addition_record: &AdditionRecord, ) -> Result, Box> { assert!( @@ -105,7 +105,7 @@ impl MsMembershipProof { ); // if window does not slide, we are done - if !MutatorSetKernel::::window_slides(new_item_index) { + if !MutatorSetAccumulator::window_slides(new_item_index) { return Ok(indices_for_updated_mps); } @@ -247,23 +247,23 @@ impl MsMembershipProof { mutator_set: &MutatorSetAccumulator, addition_record: &AdditionRecord, ) -> Result> { - assert!(self.auth_path_aocl.leaf_index < mutator_set.kernel.aocl.count_leaves()); - let new_item_index = mutator_set.kernel.aocl.count_leaves(); + assert!(self.auth_path_aocl.leaf_index < mutator_set.aocl.count_leaves()); + let new_item_index = mutator_set.aocl.count_leaves(); // Update AOCL MMR membership proof let aocl_mp_updated = self.auth_path_aocl.update_from_append( - mutator_set.kernel.aocl.count_leaves(), + mutator_set.aocl.count_leaves(), addition_record.canonical_commitment, - &mutator_set.kernel.aocl.get_peaks(), + &mutator_set.aocl.get_peaks(), ); // if window does not slide, we are done - if !MutatorSetKernel::>::window_slides(new_item_index) { + if !MutatorSetAccumulator::window_slides(new_item_index) { return Ok(aocl_mp_updated); } // window does slide - let new_chunk = mutator_set.kernel.swbf_active.slid_chunk(); + let new_chunk = mutator_set.swbf_active.slid_chunk(); let new_chunk_digest: Digest = Hash::hash(&new_chunk); // Get indices by recalculating them. (We do not cache indices any more.) @@ -285,7 +285,7 @@ impl MsMembershipProof { // a whole archival MMR for this operation, as the archival MMR can be in the // size of gigabytes, whereas the MMR accumulator should be in the size of // kilobytes. - let mut mmra: MmrAccumulator = mutator_set.kernel.swbf_inactive.to_accumulator(); + let mut mmra: MmrAccumulator = mutator_set.swbf_inactive.to_accumulator(); let new_auth_path: mmr::mmr_membership_proof::MmrMembershipProof = mmra.append(new_chunk_digest); @@ -308,9 +308,9 @@ impl MsMembershipProof { Some((m, _chnk)) => m, }; let swbf_chunk_dict_updated_local: bool = mp.update_from_append( - mutator_set.kernel.swbf_inactive.count_leaves(), + mutator_set.swbf_inactive.count_leaves(), new_chunk_digest, - &mutator_set.kernel.swbf_inactive.get_peaks(), + &mutator_set.swbf_inactive.get_peaks(), ); swbf_chunk_dictionary_updated = swbf_chunk_dictionary_updated || swbf_chunk_dict_updated_local; @@ -351,7 +351,7 @@ impl MsMembershipProof { previous_mutator_set: &MutatorSetAccumulator, ) { // calculate AOCL MMR MP length - let previous_leaf_count = previous_mutator_set.kernel.aocl.count_leaves(); + let previous_leaf_count = previous_mutator_set.aocl.count_leaves(); assert!( previous_leaf_count > self.auth_path_aocl.leaf_index, "Cannot revert a membership proof for an item to back its state before the item was added to the mutator set." @@ -365,7 +365,7 @@ impl MsMembershipProof { } // remove chunks from unslid windows - let swbfi_leaf_count = previous_mutator_set.kernel.swbf_inactive.count_leaves(); + let swbfi_leaf_count = previous_mutator_set.swbf_inactive.count_leaves(); self.target_chunks .dictionary .retain(|k, _v| *k < swbfi_leaf_count); @@ -546,7 +546,7 @@ pub fn pseudorandom_mmr_membership_proof( mod ms_proof_tests { use crate::util_types::mutator_set::chunk::Chunk; - use crate::util_types::mutator_set::mutator_set_trait::{commit, MutatorSet}; + use crate::util_types::mutator_set::commit; use crate::util_types::test_shared::mutator_set::{ empty_rusty_mutator_set, make_item_and_randomnesses, random_mutator_set_membership_proof, }; @@ -633,9 +633,7 @@ mod ms_proof_tests { for _ in 0..10 { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let mp = accumulator - .kernel - .prove(item, sender_randomness, receiver_preimage); + let mp = accumulator.prove(item, sender_randomness, receiver_preimage); let json: String = serde_json::to_string(&mp).unwrap(); let mp_again = serde_json::from_str::(&json).unwrap(); @@ -645,8 +643,8 @@ mod ms_proof_tests { } } - #[test] - fn revert_update_from_remove_test() { + #[tokio::test] + async fn revert_update_from_remove_test() { let n = 100; let mut rng = thread_rng(); @@ -655,7 +653,7 @@ mod ms_proof_tests { let mut own_item = None; // set up mutator set - let mut rms = empty_rusty_mutator_set(); + let mut rms = empty_rusty_mutator_set().await; let archival_mutator_set = rms.ams_mut(); let mut membership_proofs: Vec<(Digest, MsMembershipProof)> = vec![]; @@ -667,12 +665,17 @@ mod ms_proof_tests { let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); for (oi, mp) in membership_proofs.iter_mut() { - mp.update_from_addition(*oi, &archival_mutator_set.accumulator(), &addition_record) - .expect("Could not update membership proof from addition."); + mp.update_from_addition( + *oi, + &archival_mutator_set.accumulator().await, + &addition_record, + ) + .expect("Could not update membership proof from addition."); } - let membership_proof = - archival_mutator_set.prove(item, sender_randomness, receiver_preimage); + let membership_proof = archival_mutator_set + .prove(item, sender_randomness, receiver_preimage) + .await; if i == own_index { own_membership_proof = Some(membership_proof); own_item = Some(item); @@ -684,31 +687,33 @@ mod ms_proof_tests { .unwrap() .update_from_addition( own_item.unwrap(), - &archival_mutator_set.accumulator(), + &archival_mutator_set.accumulator().await, &addition_record, ) .expect("Could not update membership proof from addition record."); } } - archival_mutator_set.add(&addition_record); + archival_mutator_set.add(&addition_record).await; } // assert that own mp is valid assert!( - archival_mutator_set.verify(own_item.unwrap(), own_membership_proof.as_ref().unwrap()) + archival_mutator_set + .verify(own_item.unwrap(), own_membership_proof.as_ref().unwrap()) + .await ); // Assert that all other mps are valid for (itm, mp) in membership_proofs.iter() { - assert!(archival_mutator_set.verify(*itm, mp)); + assert!(archival_mutator_set.verify(*itm, mp).await); } // generate some removal records let mut removal_records = vec![]; for (item, membership_proof) in membership_proofs.into_iter() { if rng.next_u32() % 2 == 1 { - let removal_record = archival_mutator_set.drop(item, &membership_proof); + let removal_record = archival_mutator_set.drop(item, &membership_proof).await; removal_records.push(removal_record); } } @@ -731,7 +736,7 @@ mod ms_proof_tests { .update_from_remove(applied_removal_record) .expect("Could not update membership proof from removal record"); - archival_mutator_set.remove(applied_removal_record); + archival_mutator_set.remove(applied_removal_record).await; if i + 1 == cutoff_point { membership_proof_snapshot = Some(own_membership_proof.as_ref().unwrap().clone()); @@ -740,7 +745,9 @@ mod ms_proof_tests { // assert valid assert!( - archival_mutator_set.verify(own_item.unwrap(), own_membership_proof.as_ref().unwrap()) + archival_mutator_set + .verify(own_item.unwrap(), own_membership_proof.as_ref().unwrap()) + .await ); // revert some removal records @@ -753,7 +760,9 @@ mod ms_proof_tests { .revert_update_from_remove(revert_removal_record) .expect("Could not revert update from removal record."); - archival_mutator_set.revert_remove(revert_removal_record); + archival_mutator_set + .revert_remove(revert_removal_record) + .await; // keep other removal records up-to-date? // - nah, we don't need them for anything anymore @@ -761,7 +770,9 @@ mod ms_proof_tests { // assert valid assert!( - archival_mutator_set.verify(own_item.unwrap(), own_membership_proof.as_ref().unwrap()) + archival_mutator_set + .verify(own_item.unwrap(), own_membership_proof.as_ref().unwrap()) + .await ); // assert same as snapshot before application-and-reversion @@ -771,9 +782,9 @@ mod ms_proof_tests { ); } - #[test] - fn revert_update_single_remove_test() { - let mut rms = empty_rusty_mutator_set(); + #[tokio::test] + async fn revert_update_single_remove_test() { + let mut rms = empty_rusty_mutator_set().await; let ams = rms.ams_mut(); let mut mps = vec![]; let mut items = vec![]; @@ -787,26 +798,26 @@ mod ms_proof_tests { MsMembershipProof::batch_update_from_addition( &mut mps.iter_mut().collect_vec(), &items, - &ams.accumulator().kernel, + &ams.accumulator().await, &addition_record, ) .unwrap(); - mps.push(ams.prove(item, sender_randomness, receiver_preimage)); + mps.push(ams.prove(item, sender_randomness, receiver_preimage).await); items.push(item); - ams.add(&addition_record); + ams.add(&addition_record).await; addition_records.push(addition_record); } // Verify that all MPs are valid for i in 0..ms_size { - assert!(ams.verify(items[i], &mps[i])); + assert!(ams.verify(items[i], &mps[i]).await); } // Remove all `ms_size` elements from the MS let mut removal_records = vec![]; for i in 0..ms_size { - let removal_record = ams.drop(items[i], &mps[i]); - ams.remove(&removal_record); + let removal_record = ams.drop(items[i], &mps[i]).await; + ams.remove(&removal_record).await; MsMembershipProof::batch_update_from_remove( &mut mps.iter_mut().collect_vec(), &removal_record, @@ -817,43 +828,43 @@ mod ms_proof_tests { // Verify that the rest of the MPs are still valid for j in 0..ms_size { if j > i { - assert!(ams.verify(items[j], &mps[j])); + assert!(ams.verify(items[j], &mps[j]).await); } else { - assert!(!ams.verify(items[j], &mps[j])); + assert!(!ams.verify(items[j], &mps[j]).await); } } } // Verify that all MPs are invalid since their items were removed for i in 0..ms_size { - assert!(!ams.verify(items[i], &mps[i])); + assert!(!ams.verify(items[i], &mps[i]).await); } // Revert all removals in opposite order and verify that the MPs become valid again for i in (0..ms_size).rev() { - ams.revert_remove(&removal_records[i]); + ams.revert_remove(&removal_records[i]).await; for mp in mps.iter_mut().take(ms_size) { mp.revert_update_from_remove(&removal_records[i]).unwrap(); } for j in 0..ms_size { if j < i { - assert!(!ams.verify(items[j], &mps[j])); + assert!(!ams.verify(items[j], &mps[j]).await); } else { - assert!(ams.verify(items[j], &mps[j])); + assert!(ams.verify(items[j], &mps[j]).await); } } } // Verify all MPs after reverting all removals for i in 0..ms_size { - ams.verify(items[i], &mps[i]); + ams.verify(items[i], &mps[i]).await; } } - #[test] - fn revert_update_single_addition_test() { + #[tokio::test] + async fn revert_update_single_addition_test() { for j in 2..30 { - let mut rms = empty_rusty_mutator_set(); + let mut rms = empty_rusty_mutator_set().await; let ams = rms.ams_mut(); // Add `j` items to MSA @@ -869,29 +880,29 @@ mod ms_proof_tests { MsMembershipProof::batch_update_from_addition( &mut mps.iter_mut().collect_vec(), &items, - &ams.accumulator().kernel, + &ams.accumulator().await, &addition_record, ) .unwrap(); - mps.push(ams.prove(item, sender_randomness, receiver_preimage)); + mps.push(ams.prove(item, sender_randomness, receiver_preimage).await); items.push(item); - ams.add(&addition_record); + ams.add(&addition_record).await; addition_records.push(addition_record); } // Revert all adds but the first one, and keep the 1st MP updated for i in (1..j).rev() { - ams.revert_add(&addition_records[i]); - mps[0].revert_update_from_batch_addition(&ams.accumulator()); + ams.revert_add(&addition_records[i]).await; + mps[0].revert_update_from_batch_addition(&ams.accumulator().await); assert!( - ams.verify(items[0], &mps[0]), + ams.verify(items[0], &mps[0]).await, "MP should be valid after reversion" ); if i != 1 { // We also check the 2nd MP for good measure, as long as its item is still in the MS - mps[1].revert_update_from_batch_addition(&ams.accumulator()); + mps[1].revert_update_from_batch_addition(&ams.accumulator().await); assert!( - ams.verify(items[1], &mps[1]), + ams.verify(items[1], &mps[1]).await, "MP should be valid after reversion" ); } @@ -899,9 +910,9 @@ mod ms_proof_tests { } } - #[test] - fn revert_update_from_addition_batches_test() { - let mut msa: MutatorSetAccumulator = MutatorSetAccumulator::new(); + #[tokio::test] + async fn revert_update_from_addition_batches_test() { + let mut msa: MutatorSetAccumulator = MutatorSetAccumulator::default(); let mut rng = thread_rng(); for _ in 0..10 { @@ -978,8 +989,8 @@ mod ms_proof_tests { } } - #[test] - fn revert_update_from_addition_test() { + #[tokio::test] + async fn revert_update_from_addition_test() { let mut rng = thread_rng(); let n = rng.next_u32() as usize % 100 + 1; // let n = 55; @@ -990,7 +1001,7 @@ mod ms_proof_tests { let mut own_item = None; // set up mutator set - let mut rms = empty_rusty_mutator_set(); + let mut rms = empty_rusty_mutator_set().await; let archival_mutator_set = rms.ams_mut(); // add items @@ -1002,8 +1013,9 @@ mod ms_proof_tests { let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); addition_records.push(addition_record); - let membership_proof = - archival_mutator_set.prove(item, sender_randomness, receiver_preimage); + let membership_proof = archival_mutator_set + .prove(item, sender_randomness, receiver_preimage) + .await; match i.cmp(&own_index) { std::cmp::Ordering::Less => {} std::cmp::Ordering::Equal => { @@ -1011,35 +1023,41 @@ mod ms_proof_tests { own_item = Some(item); } std::cmp::Ordering::Greater => { - assert!(archival_mutator_set - .verify(own_item.unwrap(), own_membership_proof.as_ref().unwrap())); + assert!( + archival_mutator_set + .verify(own_item.unwrap(), own_membership_proof.as_ref().unwrap()) + .await + ); assert!(archival_mutator_set .accumulator() + .await .verify(own_item.unwrap(), own_membership_proof.as_ref().unwrap())); own_membership_proof .as_mut() .unwrap() .update_from_addition( own_item.unwrap(), - &archival_mutator_set.accumulator(), + &archival_mutator_set.accumulator().await, &addition_record, ) .expect("Could not update membership proof from addition record."); } } - let mutator_set_before = archival_mutator_set.accumulator(); - archival_mutator_set.add(&addition_record); + let mutator_set_before = archival_mutator_set.accumulator().await; + archival_mutator_set.add(&addition_record).await; if i > own_index { let own_item = own_item.as_ref().unwrap().to_owned(); - assert!(archival_mutator_set - .kernel - .verify(own_item, own_membership_proof.as_ref().unwrap(),)); + assert!( + archival_mutator_set + .verify(own_item, own_membership_proof.as_ref().unwrap(),) + .await + ); let mut memproof = own_membership_proof.as_ref().unwrap().clone(); - assert!(archival_mutator_set.kernel.verify(own_item, &memproof,)); + assert!(archival_mutator_set.verify(own_item, &memproof,).await); memproof.revert_update_from_batch_addition(&mutator_set_before); @@ -1051,19 +1069,22 @@ mod ms_proof_tests { // revert additions let (_petrified, revertible) = addition_records.split_at(own_index + 1); for addition_record in revertible.iter().rev() { - archival_mutator_set.revert_add(addition_record); + archival_mutator_set.revert_add(addition_record).await; own_membership_proof .as_mut() .unwrap() - .revert_update_from_batch_addition(&archival_mutator_set.accumulator()); + .revert_update_from_batch_addition(&archival_mutator_set.accumulator().await); - assert!(archival_mutator_set - .verify(own_item.unwrap(), own_membership_proof.as_ref().unwrap())); + assert!( + archival_mutator_set + .verify(own_item.unwrap(), own_membership_proof.as_ref().unwrap()) + .await + ); } } - #[test] - fn revert_updates_mixed_test() { + #[tokio::test] + async fn revert_updates_mixed_test() { let mut rng_seeder = thread_rng(); // let seed_integer = rng.next_u32(); let error_tuple: (usize, u32) = ( @@ -1082,7 +1103,7 @@ mod ms_proof_tests { let mut rng = StdRng::from_seed(seed_as_bytes); - let mut rms = empty_rusty_mutator_set(); + let mut rms = empty_rusty_mutator_set().await; let archival_mutator_set = rms.ams_mut(); let own_index = rng.next_u32() as usize % 10; @@ -1118,21 +1139,22 @@ mod ms_proof_tests { commit(item, sender_randomness, receiver_preimage.hash::()); // record membership proof - let membership_proof = - archival_mutator_set.prove(item, sender_randomness, receiver_preimage); + let membership_proof = archival_mutator_set + .prove(item, sender_randomness, receiver_preimage) + .await; // update existing membership proof for (it, mp) in tracked_items_and_membership_proofs.iter_mut() { mp.update_from_addition( *it, - &archival_mutator_set.accumulator(), + &archival_mutator_set.accumulator().await, &addition_record, ) .expect("Could not update membership proof from addition."); } // apply record - archival_mutator_set.add(&addition_record); + archival_mutator_set.add(&addition_record).await; // record record records.push(Either::Left(addition_record)); @@ -1177,7 +1199,7 @@ mod ms_proof_tests { } // generate a removal record - let removal_record = archival_mutator_set.drop(item, &membership_proof); + let removal_record = archival_mutator_set.drop(item, &membership_proof).await; // update the other membership proofs with the removal record for (_, mp) in tracked_items_and_membership_proofs.iter_mut() { @@ -1187,13 +1209,13 @@ mod ms_proof_tests { // don't lose track of the removed item assert!( - archival_mutator_set.verify(item, &membership_proof), + archival_mutator_set.verify(item, &membership_proof).await, "track index: {track_index}\nitem index: {index}", ); removed_items_and_membership_proofs.push((item, membership_proof.clone(), index)); // remove the item from the mutator set - archival_mutator_set.remove(&removal_record); + archival_mutator_set.remove(&removal_record).await; // record record records.push(Either::Right(removal_record)); @@ -1230,13 +1252,13 @@ mod ms_proof_tests { ); for _ in 0..num_reversions { if let Some(Either::Left(addition_record)) = records.pop() { - archival_mutator_set.revert_add(&addition_record); + archival_mutator_set.revert_add(&addition_record).await; } tracked_items_and_membership_proofs.pop(); } for (_, mp) in tracked_items_and_membership_proofs.iter_mut() { mp.revert_update_from_batch_addition( - &archival_mutator_set.accumulator(), + &archival_mutator_set.accumulator().await, ); } } @@ -1251,13 +1273,13 @@ mod ms_proof_tests { format!("{records_abbreviation}a"); // revert update to mutator set - archival_mutator_set.revert_add(&addition_record); + archival_mutator_set.revert_add(&addition_record).await; tracked_items_and_membership_proofs.pop(); for (_, mp) in tracked_items_and_membership_proofs.iter_mut() { mp.revert_update_from_batch_addition( - &archival_mutator_set.accumulator(), + &archival_mutator_set.accumulator().await, ); } } @@ -1269,7 +1291,9 @@ mod ms_proof_tests { format!("{records_abbreviation}r"); // revert update to mutator set - archival_mutator_set.revert_remove(&removal_record); + archival_mutator_set + .revert_remove(&removal_record) + .await; // assert valid proofs for (_, mp) in @@ -1281,8 +1305,11 @@ mod ms_proof_tests { match removed_items_and_membership_proofs.pop() { Some((item, membership_proof, index)) => { - assert!(archival_mutator_set - .verify(item, &membership_proof)); + assert!( + archival_mutator_set + .verify(item, &membership_proof) + .await + ); tracked_items_and_membership_proofs .insert(index, (item, membership_proof)); _report_index = index; @@ -1310,10 +1337,12 @@ mod ms_proof_tests { if i > own_index { assert_eq!(own_item, tracked_items_and_membership_proofs[track_index].0); assert!( - archival_mutator_set.verify( - own_item, - &tracked_items_and_membership_proofs[track_index].1 - ), + archival_mutator_set + .verify( + own_item, + &tracked_items_and_membership_proofs[track_index].1 + ) + .await, "seed: {seed_integer} / n: {n}", ); } diff --git a/src/util_types/mutator_set/msa_and_records.rs b/src/util_types/mutator_set/msa_and_records.rs index 9fdcecd0..2d7ea9fb 100644 --- a/src/util_types/mutator_set/msa_and_records.rs +++ b/src/util_types/mutator_set/msa_and_records.rs @@ -1,6 +1,19 @@ use std::collections::HashMap; +use super::{ + active_window::ActiveWindow, + chunk::Chunk, + chunk_dictionary::ChunkDictionary, + get_swbf_indices, + mmra_and_membership_proofs::MmraAndMembershipProofs, + ms_membership_proof::MsMembershipProof, + mutator_set_accumulator::MutatorSetAccumulator, + removal_record::{AbsoluteIndexSet, RemovalRecord}, + shared::{BATCH_SIZE, CHUNK_SIZE}, +}; +use crate::{util_types::mutator_set::commit, Hash}; use itertools::Itertools; +use proptest::collection::vec; use proptest::{ arbitrary::Arbitrary, strategy::{BoxedStrategy, Strategy}, @@ -14,24 +27,6 @@ use tasm_lib::{ Digest, }; -use crate::{ - models::blockchain::shared::Hash, util_types::mutator_set::mutator_set_trait::MutatorSet, -}; - -use super::{ - active_window::ActiveWindow, - chunk::Chunk, - chunk_dictionary::ChunkDictionary, - mmra_and_membership_proofs::MmraAndMembershipProofs, - ms_membership_proof::MsMembershipProof, - mutator_set_accumulator::MutatorSetAccumulator, - mutator_set_kernel::{get_swbf_indices, MutatorSetKernel}, - mutator_set_trait::commit, - removal_record::{AbsoluteIndexSet, RemovalRecord}, - shared::{BATCH_SIZE, CHUNK_SIZE}, -}; -use proptest::collection::vec; - #[derive(Debug, Clone)] pub struct MsaAndRecords { pub mutator_set_accumulator: MutatorSetAccumulator, @@ -44,7 +39,7 @@ impl MsaAndRecords { let all_removal_records_can_remove = self .removal_records .iter() - .all(|rr| self.mutator_set_accumulator.kernel.can_remove(rr)); + .all(|rr| self.mutator_set_accumulator.can_remove(rr)); assert!( all_removal_records_can_remove, "Some removal records cannot be removed!" @@ -255,11 +250,9 @@ impl Arbitrary for MsaAndRecords { arb::() .prop_map(move |active_window| { let mutator_set_accumulator = MutatorSetAccumulator { - kernel: MutatorSetKernel { aocl: aocl_mmra.clone(), swbf_inactive: swbf_mmra.clone(), swbf_active: active_window, - }, }; MsaAndRecords { diff --git a/src/util_types/mutator_set/mutator_set_accumulator.rs b/src/util_types/mutator_set/mutator_set_accumulator.rs index 59b06d51..8ce3a6bd 100644 --- a/src/util_types/mutator_set/mutator_set_accumulator.rs +++ b/src/util_types/mutator_set/mutator_set_accumulator.rs @@ -1,8 +1,15 @@ +use std::collections::HashMap; + use crate::models::blockchain::shared::Hash; use crate::prelude::twenty_first; use get_size::GetSize; +use itertools::Itertools; +use num_traits::Zero; use serde::{Deserialize, Serialize}; +use tasm_lib::twenty_first::shared_math::b_field_element::BFieldElement; +use tasm_lib::twenty_first::util_types::mmr::mmr_membership_proof::MmrMembershipProof; +use tasm_lib::DIGEST_LENGTH; use twenty_first::shared_math::bfield_codec::BFieldCodec; use twenty_first::shared_math::tip5::Digest; use twenty_first::util_types::mmr::mmr_trait::Mmr; @@ -10,76 +17,341 @@ use twenty_first::util_types::{ algebraic_hasher::AlgebraicHasher, mmr::mmr_accumulator::MmrAccumulator, }; +use super::chunk::Chunk; +use super::chunk_dictionary::ChunkDictionary; +use super::get_swbf_indices; +use super::removal_record::AbsoluteIndexSet; +use super::shared::{indices_to_hash_map, BATCH_SIZE, CHUNK_SIZE}; use super::{ active_window::ActiveWindow, addition_record::AdditionRecord, - ms_membership_proof::MsMembershipProof, mutator_set_kernel::MutatorSetKernel, - mutator_set_trait::MutatorSet, removal_record::RemovalRecord, + ms_membership_proof::MsMembershipProof, removal_record::RemovalRecord, }; #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, GetSize, BFieldCodec)] pub struct MutatorSetAccumulator { - pub kernel: MutatorSetKernel>, + pub aocl: MmrAccumulator, + pub swbf_inactive: MmrAccumulator, + pub swbf_active: ActiveWindow, } -impl MutatorSetAccumulator { - pub fn new() -> Self { - let set_commitment = MutatorSetKernel::> { +impl Default for MutatorSetAccumulator { + fn default() -> Self { + Self { aocl: MmrAccumulator::new(vec![]), swbf_inactive: MmrAccumulator::new(vec![]), - swbf_active: ActiveWindow::new(), - }; + swbf_active: Default::default(), + } + } +} +impl MutatorSetAccumulator { + pub fn new( + aocl: &[Digest], + aocl_leaf_count: u64, + swbf_inactive: &[Digest], + swbf_active: &ActiveWindow, + ) -> Self { + let swbf_inactive_leaf_count = aocl_leaf_count / (BATCH_SIZE as u64); Self { - kernel: set_commitment, + aocl: MmrAccumulator::init(aocl.to_vec(), aocl_leaf_count), + swbf_inactive: MmrAccumulator::init(swbf_inactive.to_vec(), swbf_inactive_leaf_count), + swbf_active: swbf_active.clone(), } } -} -impl Default for MutatorSetAccumulator { - fn default() -> Self { - let set_commitment = MutatorSetKernel::> { - aocl: MmrAccumulator::new(vec![]), - swbf_inactive: MmrAccumulator::new(vec![]), - swbf_active: ActiveWindow::new(), - }; + /// Helper function. Like `add` but also returns the chunk that + /// was added to the inactive SWBF if the window slid (and None + /// otherwise) since this is needed by the archival version of + /// the mutator set. + pub fn add_helper(&mut self, addition_record: &AdditionRecord) -> Option<(u64, Chunk)> { + // Notice that `add` cannot return a membership proof since `add` cannot know the + // randomness that was used to create the commitment. This randomness can only be know + // by the sender and/or receiver of the UTXO. And `add` must be run be all nodes keeping + // track of the mutator set. + + // add to list + let item_index = self.aocl.count_leaves(); + self.aocl + .append(addition_record.canonical_commitment.to_owned()); // ignore auth path + + if !Self::window_slides(item_index) { + return None; + } - Self { - kernel: set_commitment, + // if window slides, update filter + // First update the inactive part of the SWBF, the SWBF MMR + let new_chunk: Chunk = self.swbf_active.slid_chunk(); + let chunk_digest: Digest = Hash::hash(&new_chunk); + let new_chunk_index = self.swbf_inactive.count_leaves(); + self.swbf_inactive.append(chunk_digest); // ignore auth path + + // Then move window to the right, equivalent to moving values + // inside window to the left. + self.swbf_active.slide_window(); + + // Return the chunk that was added to the inactive part of the SWBF. + // This chunk is needed by the Archival mutator set. The Regular + // mutator set can ignore it. + Some((new_chunk_index, new_chunk)) + } + + /// Return the batch index for the latest addition to the mutator set + pub fn get_batch_index(&self) -> u64 { + match self.aocl.count_leaves() { + 0 => 0, + n => (n - 1) / BATCH_SIZE as u64, } } + + /// Remove a record and return the chunks that have been updated in this process, + /// after applying the update. Does not mutate the removal record. + pub fn remove_helper(&mut self, removal_record: &RemovalRecord) -> HashMap { + let batch_index = self.get_batch_index(); + let active_window_start = batch_index as u128 * CHUNK_SIZE as u128; + + // insert all indices + let mut new_target_chunks: ChunkDictionary = removal_record.target_chunks.clone(); + let chunkindices_to_indices_dict: HashMap> = + removal_record.get_chunkidx_to_indices_dict(); + + for (chunk_index, indices) in chunkindices_to_indices_dict { + if chunk_index >= batch_index { + // index is in the active part, so insert it in the active part of the Bloom filter + for index in indices { + let relative_index = (index - active_window_start) as u32; + self.swbf_active.insert(relative_index); + } + + continue; + } + + // If chunk index is not in the active part, insert the index into the relevant chunk + let new_target_chunks_clone = new_target_chunks.clone(); + let relevant_chunk = new_target_chunks + .dictionary + .get_mut(&chunk_index) + .unwrap_or_else(|| { + panic!( + "Can't get chunk index {chunk_index} from removal record dictionary! dictionary: {:?}\nAOCL size: {}\nbatch index: {}\nRemoval record: {:?}", + new_target_chunks_clone.dictionary, + self.aocl.count_leaves(), + batch_index, + removal_record + ) + }); + for index in indices { + let relative_index = (index % CHUNK_SIZE as u128) as u32; + relevant_chunk.1.insert(relative_index); + } + } + + // update mmr + // to do this, we need to keep track of all membership proofs + let all_mmr_membership_proofs = new_target_chunks + .dictionary + .values() + .map(|(p, _c)| p.to_owned()); + let all_leafs = new_target_chunks + .dictionary + .values() + .map(|(_p, chunk)| Hash::hash(chunk)); + let mutation_data: Vec<(MmrMembershipProof, Digest)> = + all_mmr_membership_proofs.zip(all_leafs).collect(); + + // If we want to update the membership proof with this removal, we + // could use the below function. + self.swbf_inactive + .batch_mutate_leaf_and_update_mps(&mut [], mutation_data); + + new_target_chunks + .dictionary + .into_iter() + .map(|(chunk_index, (_mp, chunk))| (chunk_index, chunk)) + .collect() + } + + /// Check if a removal record can be applied to a mutator set. Returns false if either + /// the MMR membership proofs are unsynced, or if all its indices are already set. + pub fn can_remove(&self, removal_record: &RemovalRecord) -> bool { + let mut have_absent_index = false; + if !removal_record.validate(self) { + return false; + } + + for inserted_index in removal_record.absolute_indices.to_vec().into_iter() { + // determine if inserted index lives in active window + let active_window_start = + (self.aocl.count_leaves() / BATCH_SIZE as u64) as u128 * CHUNK_SIZE as u128; + if inserted_index < active_window_start { + let inserted_index_chunkidx = (inserted_index / CHUNK_SIZE as u128) as u64; + if let Some((_mmr_mp, chunk)) = removal_record + .target_chunks + .dictionary + .get(&inserted_index_chunkidx) + { + let relative_index = (inserted_index % CHUNK_SIZE as u128) as u32; + if !chunk.contains(relative_index) { + have_absent_index = true; + break; + } + } + } else { + let relative_index = (inserted_index - active_window_start) as u32; + if !self.swbf_active.contains(relative_index) { + have_absent_index = true; + break; + } + } + } + + have_absent_index + } } -impl MutatorSet for MutatorSetAccumulator { - fn prove( - &mut self, +impl MutatorSetAccumulator { + /// Generates a membership proof that will the valid when the item + /// is added to the mutator set. + pub fn prove( + &self, item: Digest, sender_randomness: Digest, receiver_preimage: Digest, ) -> MsMembershipProof { - self.kernel - .prove(item, sender_randomness, receiver_preimage) + // compute commitment + let item_commitment = Hash::hash_pair(item, sender_randomness); + + // simulate adding to commitment list + let auth_path_aocl = self.aocl.to_accumulator().append(item_commitment); + let target_chunks: ChunkDictionary = ChunkDictionary::default(); + + // return membership proof + MsMembershipProof { + sender_randomness: sender_randomness.to_owned(), + receiver_preimage: receiver_preimage.to_owned(), + auth_path_aocl, + target_chunks, + } } - fn verify(&self, item: Digest, membership_proof: &MsMembershipProof) -> bool { - self.kernel.verify(item, membership_proof) + pub fn verify(&self, item: Digest, membership_proof: &MsMembershipProof) -> bool { + // If data index does not exist in AOCL, return false + // This also ensures that no "future" indices will be + // returned from `get_indices`, so we don't have to check for + // future indices in a separate check. + if self.aocl.count_leaves() <= membership_proof.auth_path_aocl.leaf_index { + return false; + } + + // verify that a commitment to the item lives in the aocl mmr + let leaf = Hash::hash_pair( + Hash::hash_pair(item, membership_proof.sender_randomness), + Hash::hash_pair( + membership_proof.receiver_preimage, + Digest::new([BFieldElement::zero(); DIGEST_LENGTH]), + ), + ); + let (is_aocl_member, _) = membership_proof.auth_path_aocl.verify( + &self.aocl.get_peaks(), + leaf, + self.aocl.count_leaves(), + ); + if !is_aocl_member { + return false; + } + + // verify that some indices are not present in the swbf + let mut has_absent_index = false; + let mut entries_in_dictionary = true; + let mut all_auth_paths_are_valid = true; + + // prepare parameters of inactive part + let current_batch_index: u64 = self.get_batch_index(); + let window_start = current_batch_index as u128 * CHUNK_SIZE as u128; + + // Get all bloom filter indices + let all_indices = AbsoluteIndexSet::new(&get_swbf_indices( + item, + membership_proof.sender_randomness, + membership_proof.receiver_preimage, + membership_proof.auth_path_aocl.leaf_index, + )); + + let chunkidx_to_indices_dict = indices_to_hash_map(&all_indices.to_array()); + 'outer: for (chunk_index, indices) in chunkidx_to_indices_dict.into_iter() { + if chunk_index < current_batch_index { + // verify mmr auth path + if !membership_proof + .target_chunks + .dictionary + .contains_key(&chunk_index) + { + entries_in_dictionary = false; + break 'outer; + } + + let mp_and_chunk: &(MmrMembershipProof, Chunk) = membership_proof + .target_chunks + .dictionary + .get(&chunk_index) + .unwrap(); + let (valid_auth_path, _) = mp_and_chunk.0.verify( + &self.swbf_inactive.get_peaks(), + Hash::hash(&mp_and_chunk.1), + self.swbf_inactive.count_leaves(), + ); + + all_auth_paths_are_valid = all_auth_paths_are_valid && valid_auth_path; + + 'inner_inactive: for index in indices { + let index_within_chunk = index % CHUNK_SIZE as u128; + if !mp_and_chunk.1.contains(index_within_chunk as u32) { + has_absent_index = true; + break 'inner_inactive; + } + } + } else { + // indices are in active window + 'inner_active: for index in indices { + let relative_index = index - window_start; + if !self.swbf_active.contains(relative_index as u32) { + has_absent_index = true; + break 'inner_active; + } + } + } + } + + // return verdict + is_aocl_member && entries_in_dictionary && all_auth_paths_are_valid && has_absent_index } - fn drop(&self, item: Digest, membership_proof: &MsMembershipProof) -> RemovalRecord { - self.kernel.drop(item, membership_proof) + /// Generates a removal record with which to update the set commitment. + pub fn drop(&self, item: Digest, membership_proof: &MsMembershipProof) -> RemovalRecord { + let indices: AbsoluteIndexSet = AbsoluteIndexSet::new(&get_swbf_indices( + item, + membership_proof.sender_randomness, + membership_proof.receiver_preimage, + membership_proof.auth_path_aocl.leaf_index, + )); + + RemovalRecord { + absolute_indices: indices, + target_chunks: membership_proof.target_chunks.clone(), + } } - fn add(&mut self, addition_record: &AdditionRecord) { - self.kernel.add_helper(addition_record); + pub fn add(&mut self, addition_record: &AdditionRecord) { + self.add_helper(addition_record); } - fn remove(&mut self, removal_record: &RemovalRecord) { - self.kernel.remove_helper(removal_record); + pub fn remove(&mut self, removal_record: &RemovalRecord) { + self.remove_helper(removal_record); } - fn hash(&self) -> Digest { - let aocl_mmr_bagged = self.kernel.aocl.bag_peaks(); - let inactive_swbf_bagged = self.kernel.swbf_inactive.bag_peaks(); - let active_swbf_bagged = Hash::hash(&self.kernel.swbf_active); + pub fn hash(&self) -> Digest { + let aocl_mmr_bagged = self.aocl.bag_peaks(); + let inactive_swbf_bagged = self.swbf_inactive.bag_peaks(); + let active_swbf_bagged = Hash::hash(&self.swbf_active); let default = Digest::default(); Hash::hash_pair( @@ -88,31 +360,147 @@ impl MutatorSet for MutatorSetAccumulator { ) } - fn batch_remove( + /// Apply a bunch of removal records. Return a hashmap of + /// { chunk index => updated_chunk }. + pub fn batch_remove( &mut self, - removal_records: Vec, + mut removal_records: Vec, preserved_membership_proofs: &mut [&mut MsMembershipProof], - ) { - self.kernel - .batch_remove(removal_records, preserved_membership_proofs); + ) -> HashMap { + { + let batch_index = self.get_batch_index(); + let active_window_start = batch_index as u128 * CHUNK_SIZE as u128; + + // Collect all indices that that are set by the removal records + let all_removal_records_indices: Vec = removal_records + .iter() + .map(|x| x.absolute_indices.to_vec()) + .concat(); + + // Loop over all indices from removal records in order to create a mapping + // {chunk index => chunk mutation } where "chunk mutation" has the type of + // `Chunk` but only represents the values which are set by the removal records + // being handled. + let mut chunkidx_to_chunk_difference_dict: HashMap = HashMap::new(); + all_removal_records_indices.iter().for_each(|index| { + if *index >= active_window_start { + let relative_index = (index - active_window_start) as u32; + self.swbf_active.insert(relative_index); + } else { + chunkidx_to_chunk_difference_dict + .entry((index / CHUNK_SIZE as u128) as u64) + .or_insert_with(Chunk::empty_chunk) + .insert((*index % CHUNK_SIZE as u128) as u32); + } + }); + + // Collect all affected chunks as they look before these removal records are applied + // These chunks are part of the removal records, so we fetch them there. + let mut mutation_data_preimage: HashMap)> = + HashMap::new(); + for removal_record in removal_records.iter_mut() { + for (chunk_index, (mmr_mp, chunk)) in + removal_record.target_chunks.dictionary.iter_mut() + { + let chunk_hash = Hash::hash(chunk); + let prev_val = + mutation_data_preimage.insert(*chunk_index, (chunk, mmr_mp.to_owned())); + + // Sanity check that all removal records agree on both chunks and MMR membership + // proofs. + if let Some((chnk, mm)) = prev_val { + assert!(mm == *mmr_mp && chunk_hash == Hash::hash(chnk)) + } + } + } + + // Apply the removal records: the new chunk is obtained by adding the chunk difference + for (chunk_index, (chunk, _)) in mutation_data_preimage.iter_mut() { + **chunk = chunk + .clone() + .combine(chunkidx_to_chunk_difference_dict[chunk_index].clone()) + .clone(); + } + + // Set the chunk values in the membership proofs that we want to preserve to the + // newly calculated chunk values. + // This is done by looping over all membership proofs and checking if they contain + // any of the chunks that are affected by the removal records. + for mp in preserved_membership_proofs.iter_mut() { + for (chunk_index, (_, chunk)) in mp.target_chunks.dictionary.iter_mut() { + if mutation_data_preimage.contains_key(chunk_index) { + *chunk = mutation_data_preimage[chunk_index].0.to_owned(); + } + } + } + + // Calculate the digests of the affected leafs in the inactive part of the sliding-window + // Bloom filter such that we can apply a batch-update operation to the MMR through which + // this part of the Bloom filter is represented. + let swbf_inactive_mutation_data: Vec<(MmrMembershipProof, Digest)> = + mutation_data_preimage + .into_values() + .map(|x| (x.1, Hash::hash(x.0))) + .collect(); + + // Create a vector of pointers to the MMR-membership part of the mutator set membership + // proofs that we want to preserve. This is used as input to a batch-call to the + // underlying MMR. + let mut preseved_mmr_membership_proofs: Vec<&mut MmrMembershipProof> = + preserved_membership_proofs + .iter_mut() + .flat_map(|x| { + x.target_chunks + .dictionary + .iter_mut() + .map(|y| &mut y.1 .0) + .collect::>() + }) + .collect(); + + // Apply the batch-update to the inactive part of the sliding window Bloom filter. + // This updates both the inactive part of the SWBF and the MMR membership proofs + self.swbf_inactive.batch_mutate_leaf_and_update_mps( + &mut preseved_mmr_membership_proofs, + swbf_inactive_mutation_data, + ); + + chunkidx_to_chunk_difference_dict + } + } + + /// Determine if the window slides before absorbing an item, + /// given the index of the to-be-added item. + pub fn window_slides(added_index: u64) -> bool { + added_index != 0 && added_index % BATCH_SIZE as u64 == 0 + + // example cases: + // - index == 0 we don't care about + // - index == 1 does not generate a slide + // - index == n * BATCH_SIZE generates a slide for any n + } + + pub fn window_slides_back(removed_index: u64) -> bool { + Self::window_slides(removed_index) } } #[cfg(test)] mod ms_accumulator_tests { use crate::util_types::{ - mutator_set::shared::{BATCH_SIZE, CHUNK_SIZE, NUM_TRIALS, WINDOW_SIZE}, + mutator_set::{ + commit, + shared::{BATCH_SIZE, CHUNK_SIZE, NUM_TRIALS, WINDOW_SIZE}, + }, test_shared::mutator_set::*, }; use itertools::{izip, Itertools}; use rand::{thread_rng, Rng}; - use crate::util_types::mutator_set::mutator_set_trait::commit; - use super::*; - #[test] - fn mutator_set_batch_remove_accumulator_test() { + #[tokio::test] + async fn mutator_set_batch_remove_accumulator_test() { // Test the batch-remove function for mutator set accumulator let mut accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); let mut membership_proofs: Vec = vec![]; @@ -129,7 +517,7 @@ mod ms_accumulator_tests { MsMembershipProof::batch_update_from_addition( &mut membership_proofs.iter_mut().collect::>(), &items, - &accumulator.kernel, + &accumulator, &addition_record, ) .expect("MS membership update must work"); @@ -173,8 +561,8 @@ mod ms_accumulator_tests { } } - #[test] - fn mutator_set_accumulator_pbt() { + #[tokio::test] + async fn mutator_set_accumulator_pbt() { // This tests verifies that items can be added and removed from the mutator set // without assuming anything about the order of the adding and removal. It also // verifies that the membership proofs handled through an mutator set accumulator @@ -185,9 +573,9 @@ mod ms_accumulator_tests { // lot of code duplication that is avoided by doing that. let mut accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); - let mut rms_after = empty_rusty_mutator_set(); + let mut rms_after = empty_rusty_mutator_set().await; let archival_after_remove = rms_after.ams_mut(); - let mut rms_before = empty_rusty_mutator_set(); + let mut rms_before = empty_rusty_mutator_set().await; let archival_before_remove = rms_before.ams_mut(); let number_of_interactions = 100; let mut rng = rand::thread_rng(); @@ -207,7 +595,7 @@ mod ms_accumulator_tests { let new_commitment = accumulator.hash(); assert_eq!( new_commitment, - archival_after_remove.hash(), + archival_after_remove.hash().await, "Commitment to archival/accumulator MS must agree" ); match last_ms_commitment { @@ -234,7 +622,7 @@ mod ms_accumulator_tests { let update_result = MsMembershipProof::batch_update_from_addition( &mut membership_proofs_batch.iter_mut().collect::>(), &items, - &accumulator.kernel, + &accumulator, &addition_record, ); assert!(update_result.is_ok(), "Batch mutation must return OK"); @@ -248,8 +636,8 @@ mod ms_accumulator_tests { } accumulator.add(&addition_record); - archival_after_remove.add(&addition_record); - archival_before_remove.add(&addition_record); + archival_after_remove.add(&addition_record).await; + archival_before_remove.add(&addition_record).await; let updated_mp_indices = update_result.unwrap(); println!("{}: Inserted", i); @@ -297,7 +685,7 @@ mod ms_accumulator_tests { // generate removal record let removal_record: RemovalRecord = accumulator.drop(removal_item, &removal_mp); - assert!(removal_record.validate(&accumulator.kernel)); + assert!(removal_record.validate(&accumulator)); // update membership proofs // Uppdate membership proofs in batch @@ -322,14 +710,18 @@ mod ms_accumulator_tests { assert!(accumulator.verify(removal_item, &removal_mp)); let removal_record_copy = removal_record.clone(); accumulator.remove(&removal_record); - archival_after_remove.remove(&removal_record); + archival_after_remove.remove(&removal_record).await; // Verify that removal record's indices are all set for removed_index in removal_record.absolute_indices.to_vec() { - assert!(archival_after_remove.bloom_filter_contains(removed_index)); + assert!( + archival_after_remove + .bloom_filter_contains(removed_index) + .await + ); } - archival_before_remove.remove(&removal_record_copy); + archival_before_remove.remove(&removal_record_copy).await; assert!(!accumulator.verify(removal_item, &removal_mp)); // Verify that the sequential `update_from_remove` return value is correct @@ -395,6 +787,7 @@ mod ms_accumulator_tests { receiver_preimage, mp_batch.auth_path_aocl.leaf_index, ) + .await .unwrap(); assert_eq!(arch_mp, mp_batch.to_owned()); @@ -425,7 +818,7 @@ mod ms_accumulator_tests { "profiling Mutator Set (w, b, s, k) = ({}, {}, {}, {}) ...", WINDOW_SIZE, BATCH_SIZE, CHUNK_SIZE, NUM_TRIALS ); - let mut msa = MutatorSetAccumulator::new(); + let mut msa = MutatorSetAccumulator::default(); let mut items_and_membership_proofs: Vec<(Digest, MsMembershipProof)> = vec![]; let target_set_size = 100; let num_iterations = 10000; diff --git a/src/util_types/mutator_set/mutator_set_kernel.rs b/src/util_types/mutator_set/mutator_set_kernel.rs deleted file mode 100644 index ab1a7256..00000000 --- a/src/util_types/mutator_set/mutator_set_kernel.rs +++ /dev/null @@ -1,1108 +0,0 @@ -use crate::models::blockchain::shared::Hash; -use crate::prelude::twenty_first; - -use get_size::GetSize; -use itertools::Itertools; -use num_traits::Zero; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::{error::Error, fmt}; -use tasm_lib::twenty_first::util_types::algebraic_hasher::{AlgebraicHasher, Sponge}; -use twenty_first::shared_math::b_field_element::BFieldElement; -use twenty_first::shared_math::bfield_codec::BFieldCodec; -use twenty_first::shared_math::tip5::{Digest, DIGEST_LENGTH}; -use twenty_first::util_types::mmr; -use twenty_first::util_types::mmr::mmr_membership_proof::MmrMembershipProof; -use twenty_first::util_types::mmr::mmr_trait::Mmr; - -use super::active_window::ActiveWindow; -use super::addition_record::AdditionRecord; -use super::chunk::Chunk; -use super::chunk_dictionary::ChunkDictionary; -use super::ms_membership_proof::MsMembershipProof; -use super::removal_record::AbsoluteIndexSet; -use super::removal_record::RemovalRecord; -use super::shared::{indices_to_hash_map, BATCH_SIZE, CHUNK_SIZE, NUM_TRIALS, WINDOW_SIZE}; - -impl Error for MutatorSetKernelError {} - -impl fmt::Display for MutatorSetKernelError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -#[derive(PartialEq, Eq, Debug)] -pub enum MutatorSetKernelError { - RequestedAoclAuthPathOutOfBounds((u64, u64)), - RequestedSwbfAuthPathOutOfBounds((u64, u64)), - MutatorSetIsEmpty, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, GetSize)] -pub struct MutatorSetKernel> { - pub aocl: MMR, - pub swbf_inactive: MMR, - pub swbf_active: ActiveWindow, -} - -/// Get the (absolute) indices for removing this item from the mutator set. -pub fn get_swbf_indices( - item: Digest, - sender_randomness: Digest, - receiver_preimage: Digest, - aocl_leaf_index: u64, -) -> [u128; NUM_TRIALS as usize] { - let batch_index: u128 = aocl_leaf_index as u128 / BATCH_SIZE as u128; - let batch_offset: u128 = batch_index * CHUNK_SIZE as u128; - let leaf_index_bfes = aocl_leaf_index.encode(); - let input = [ - item.encode(), - sender_randomness.encode(), - receiver_preimage.encode(), - leaf_index_bfes, - ] - .concat(); - - let mut sponge = Hash::init(); - Hash::pad_and_absorb_all(&mut sponge, &input); - Hash::sample_indices(&mut sponge, WINDOW_SIZE, NUM_TRIALS as usize) - .into_iter() - .map(|sample_index| sample_index as u128 + batch_offset) - .collect_vec() - .try_into() - .unwrap() -} - -impl> MutatorSetKernel { - /// Generates a removal record with which to update the set commitment. - pub fn drop(&self, item: Digest, membership_proof: &MsMembershipProof) -> RemovalRecord { - let indices: AbsoluteIndexSet = AbsoluteIndexSet::new(&get_swbf_indices( - item, - membership_proof.sender_randomness, - membership_proof.receiver_preimage, - membership_proof.auth_path_aocl.leaf_index, - )); - - RemovalRecord { - absolute_indices: indices, - target_chunks: membership_proof.target_chunks.clone(), - } - } - - /// Determine if the window slides before absorbing an item, - /// given the index of the to-be-added item. - pub fn window_slides(added_index: u64) -> bool { - added_index != 0 && added_index % BATCH_SIZE as u64 == 0 - - // example cases: - // - index == 0 we don't care about - // - index == 1 does not generate a slide - // - index == n * BATCH_SIZE generates a slide for any n - } - - pub fn window_slides_back(removed_index: u64) -> bool { - Self::window_slides(removed_index) - } - - /// Return the batch index for the latest addition to the mutator set - pub fn get_batch_index(&self) -> u64 { - match self.aocl.count_leaves() { - 0 => 0, - n => (n - 1) / BATCH_SIZE as u64, - } - } - - /// Helper function. Like `add` but also returns the chunk that - /// was added to the inactive SWBF if the window slid (and None - /// otherwise) since this is needed by the archival version of - /// the mutator set. - pub fn add_helper(&mut self, addition_record: &AdditionRecord) -> Option<(u64, Chunk)> { - // Notice that `add` cannot return a membership proof since `add` cannot know the - // randomness that was used to create the commitment. This randomness can only be know - // by the sender and/or receiver of the UTXO. And `add` must be run be all nodes keeping - // track of the mutator set. - - // add to list - let item_index = self.aocl.count_leaves(); - self.aocl - .append(addition_record.canonical_commitment.to_owned()); // ignore auth path - - if !Self::window_slides(item_index) { - return None; - } - - // if window slides, update filter - // First update the inactive part of the SWBF, the SWBF MMR - let new_chunk: Chunk = self.swbf_active.slid_chunk(); - let chunk_digest: Digest = Hash::hash(&new_chunk); - let new_chunk_index = self.swbf_inactive.count_leaves(); - self.swbf_inactive.append(chunk_digest); // ignore auth path - - // Then move window to the right, equivalent to moving values - // inside window to the left. - self.swbf_active.slide_window(); - - // Return the chunk that was added to the inactive part of the SWBF. - // This chunk is needed by the Archival mutator set. The Regular - // mutator set can ignore it. - Some((new_chunk_index, new_chunk)) - } - - /// Remove a record and return the chunks that have been updated in this process, - /// after applying the update. Does not mutate the removal record. - pub fn remove_helper(&mut self, removal_record: &RemovalRecord) -> HashMap { - let batch_index = self.get_batch_index(); - let active_window_start = batch_index as u128 * CHUNK_SIZE as u128; - - // insert all indices - let mut new_target_chunks: ChunkDictionary = removal_record.target_chunks.clone(); - let chunkindices_to_indices_dict: HashMap> = - removal_record.get_chunkidx_to_indices_dict(); - - for (chunk_index, indices) in chunkindices_to_indices_dict { - if chunk_index >= batch_index { - // index is in the active part, so insert it in the active part of the Bloom filter - for index in indices { - let relative_index = (index - active_window_start) as u32; - self.swbf_active.insert(relative_index); - } - - continue; - } - - // If chunk index is not in the active part, insert the index into the relevant chunk - let new_target_chunks_clone = new_target_chunks.clone(); - let relevant_chunk = new_target_chunks - .dictionary - .get_mut(&chunk_index) - .unwrap_or_else(|| { - panic!( - "Can't get chunk index {chunk_index} from removal record dictionary! dictionary: {:?}\nAOCL size: {}\nbatch index: {}\nRemoval record: {:?}", - new_target_chunks_clone.dictionary, - self.aocl.count_leaves(), - batch_index, - removal_record - ) - }); - for index in indices { - let relative_index = (index % CHUNK_SIZE as u128) as u32; - relevant_chunk.1.insert(relative_index); - } - } - - // update mmr - // to do this, we need to keep track of all membership proofs - let all_mmr_membership_proofs = new_target_chunks - .dictionary - .values() - .map(|(p, _c)| p.to_owned()); - let all_leafs = new_target_chunks - .dictionary - .values() - .map(|(_p, chunk)| Hash::hash(chunk)); - let mutation_data: Vec<(MmrMembershipProof, Digest)> = - all_mmr_membership_proofs.zip(all_leafs).collect(); - - // If we want to update the membership proof with this removal, we - // could use the below function. - self.swbf_inactive - .batch_mutate_leaf_and_update_mps(&mut [], mutation_data); - - new_target_chunks - .dictionary - .into_iter() - .map(|(chunk_index, (_mp, chunk))| (chunk_index, chunk)) - .collect() - } - - /// Generates a membership proof that will the valid when the item - /// is added to the mutator set. - pub fn prove( - &self, - item: Digest, - sender_randomness: Digest, - receiver_preimage: Digest, - ) -> MsMembershipProof { - // compute commitment - let item_commitment = Hash::hash_pair(item, sender_randomness); - - // simulate adding to commitment list - let auth_path_aocl = self.aocl.to_accumulator().append(item_commitment); - let target_chunks: ChunkDictionary = ChunkDictionary::default(); - - // return membership proof - MsMembershipProof { - sender_randomness: sender_randomness.to_owned(), - receiver_preimage: receiver_preimage.to_owned(), - auth_path_aocl, - target_chunks, - } - } - - pub fn verify(&self, item: Digest, membership_proof: &MsMembershipProof) -> bool { - // If data index does not exist in AOCL, return false - // This also ensures that no "future" indices will be - // returned from `get_indices`, so we don't have to check for - // future indices in a separate check. - if self.aocl.count_leaves() <= membership_proof.auth_path_aocl.leaf_index { - return false; - } - - // verify that a commitment to the item lives in the aocl mmr - let leaf = Hash::hash_pair( - Hash::hash_pair(item, membership_proof.sender_randomness), - Hash::hash_pair( - membership_proof.receiver_preimage, - Digest::new([BFieldElement::zero(); DIGEST_LENGTH]), - ), - ); - let (is_aocl_member, _) = membership_proof.auth_path_aocl.verify( - &self.aocl.get_peaks(), - leaf, - self.aocl.count_leaves(), - ); - if !is_aocl_member { - return false; - } - - // verify that some indices are not present in the swbf - let mut has_absent_index = false; - let mut entries_in_dictionary = true; - let mut all_auth_paths_are_valid = true; - - // prepare parameters of inactive part - let current_batch_index: u64 = self.get_batch_index(); - let window_start = current_batch_index as u128 * CHUNK_SIZE as u128; - - // Get all bloom filter indices - let all_indices = AbsoluteIndexSet::new(&get_swbf_indices( - item, - membership_proof.sender_randomness, - membership_proof.receiver_preimage, - membership_proof.auth_path_aocl.leaf_index, - )); - - let chunkidx_to_indices_dict = indices_to_hash_map(&all_indices.to_array()); - 'outer: for (chunk_index, indices) in chunkidx_to_indices_dict.into_iter() { - if chunk_index < current_batch_index { - // verify mmr auth path - if !membership_proof - .target_chunks - .dictionary - .contains_key(&chunk_index) - { - entries_in_dictionary = false; - break 'outer; - } - - let mp_and_chunk: &(mmr::mmr_membership_proof::MmrMembershipProof, Chunk) = - membership_proof - .target_chunks - .dictionary - .get(&chunk_index) - .unwrap(); - let (valid_auth_path, _) = mp_and_chunk.0.verify( - &self.swbf_inactive.get_peaks(), - Hash::hash(&mp_and_chunk.1), - self.swbf_inactive.count_leaves(), - ); - - all_auth_paths_are_valid = all_auth_paths_are_valid && valid_auth_path; - - 'inner_inactive: for index in indices { - let index_within_chunk = index % CHUNK_SIZE as u128; - if !mp_and_chunk.1.contains(index_within_chunk as u32) { - has_absent_index = true; - break 'inner_inactive; - } - } - } else { - // indices are in active window - 'inner_active: for index in indices { - let relative_index = index - window_start; - if !self.swbf_active.contains(relative_index as u32) { - has_absent_index = true; - break 'inner_active; - } - } - } - } - - // return verdict - is_aocl_member && entries_in_dictionary && all_auth_paths_are_valid && has_absent_index - } - - /// Apply a bunch of removal records. Return a hashmap of - /// { chunk index => updated_chunk }. - pub fn batch_remove( - &mut self, - mut removal_records: Vec, - preserved_membership_proofs: &mut [&mut MsMembershipProof], - ) -> HashMap { - let batch_index = self.get_batch_index(); - let active_window_start = batch_index as u128 * CHUNK_SIZE as u128; - - // Collect all indices that that are set by the removal records - let all_removal_records_indices: Vec = removal_records - .iter() - .map(|x| x.absolute_indices.to_vec()) - .concat(); - - // Loop over all indices from removal records in order to create a mapping - // {chunk index => chunk mutation } where "chunk mutation" has the type of - // `Chunk` but only represents the values which are set by the removal records - // being handled. - let mut chunkidx_to_chunk_difference_dict: HashMap = HashMap::new(); - all_removal_records_indices.iter().for_each(|index| { - if *index >= active_window_start { - let relative_index = (index - active_window_start) as u32; - self.swbf_active.insert(relative_index); - } else { - chunkidx_to_chunk_difference_dict - .entry((index / CHUNK_SIZE as u128) as u64) - .or_insert_with(Chunk::empty_chunk) - .insert((*index % CHUNK_SIZE as u128) as u32); - } - }); - - // Collect all affected chunks as they look before these removal records are applied - // These chunks are part of the removal records, so we fetch them there. - let mut mutation_data_preimage: HashMap)> = - HashMap::new(); - for removal_record in removal_records.iter_mut() { - for (chunk_index, (mmr_mp, chunk)) in removal_record.target_chunks.dictionary.iter_mut() - { - let chunk_hash = Hash::hash(chunk); - let prev_val = - mutation_data_preimage.insert(*chunk_index, (chunk, mmr_mp.to_owned())); - - // Sanity check that all removal records agree on both chunks and MMR membership - // proofs. - if let Some((chnk, mm)) = prev_val { - assert!(mm == *mmr_mp && chunk_hash == Hash::hash(chnk)) - } - } - } - - // Apply the removal records: the new chunk is obtained by adding the chunk difference - for (chunk_index, (chunk, _)) in mutation_data_preimage.iter_mut() { - **chunk = chunk - .clone() - .combine(chunkidx_to_chunk_difference_dict[chunk_index].clone()) - .clone(); - } - - // Set the chunk values in the membership proofs that we want to preserve to the - // newly calculated chunk values. - // This is done by looping over all membership proofs and checking if they contain - // any of the chunks that are affected by the removal records. - for mp in preserved_membership_proofs.iter_mut() { - for (chunk_index, (_, chunk)) in mp.target_chunks.dictionary.iter_mut() { - if mutation_data_preimage.contains_key(chunk_index) { - *chunk = mutation_data_preimage[chunk_index].0.to_owned(); - } - } - } - - // Calculate the digests of the affected leafs in the inactive part of the sliding-window - // Bloom filter such that we can apply a batch-update operation to the MMR through which - // this part of the Bloom filter is represented. - let swbf_inactive_mutation_data: Vec<(MmrMembershipProof, Digest)> = - mutation_data_preimage - .into_values() - .map(|x| (x.1, Hash::hash(x.0))) - .collect(); - - // Create a vector of pointers to the MMR-membership part of the mutator set membership - // proofs that we want to preserve. This is used as input to a batch-call to the - // underlying MMR. - let mut preseved_mmr_membership_proofs: Vec<&mut MmrMembershipProof> = - preserved_membership_proofs - .iter_mut() - .flat_map(|x| { - x.target_chunks - .dictionary - .iter_mut() - .map(|y| &mut y.1 .0) - .collect::>() - }) - .collect(); - - // Apply the batch-update to the inactive part of the sliding window Bloom filter. - // This updates both the inactive part of the SWBF and the MMR membership proofs - self.swbf_inactive.batch_mutate_leaf_and_update_mps( - &mut preseved_mmr_membership_proofs, - swbf_inactive_mutation_data, - ); - - chunkidx_to_chunk_difference_dict - } - - /// Check if a removal record can be applied to a mutator set. Returns false if either - /// the MMR membership proofs are unsynced, or if all its indices are already set. - pub fn can_remove(&self, removal_record: &RemovalRecord) -> bool { - let mut have_absent_index = false; - if !removal_record.validate(self) { - return false; - } - - for inserted_index in removal_record.absolute_indices.to_vec().into_iter() { - // determine if inserted index lives in active window - let active_window_start = - (self.aocl.count_leaves() / BATCH_SIZE as u64) as u128 * CHUNK_SIZE as u128; - if inserted_index < active_window_start { - let inserted_index_chunkidx = (inserted_index / CHUNK_SIZE as u128) as u64; - if let Some((_mmr_mp, chunk)) = removal_record - .target_chunks - .dictionary - .get(&inserted_index_chunkidx) - { - let relative_index = (inserted_index % CHUNK_SIZE as u128) as u32; - if !chunk.contains(relative_index) { - have_absent_index = true; - break; - } - } - } else { - let relative_index = (inserted_index - active_window_start) as u32; - if !self.swbf_active.contains(relative_index) { - have_absent_index = true; - break; - } - } - } - - have_absent_index - } -} - -impl + BFieldCodec> BFieldCodec for MutatorSetKernel { - type Error = anyhow::Error; - fn decode(sequence: &[BFieldElement]) -> anyhow::Result> { - let mut index = 0; - let aocl_len: usize = match sequence.first() { - Some(aocl_len) => aocl_len.value().try_into()?, - None => anyhow::bail!("Invalid sequence length for decoding MutatorSetKernel."), - }; - index += 1; - let aocl = match MMR::decode(&sequence[index..(index + aocl_len)]) { - Ok(decoded) => *decoded, - Err(err) => anyhow::bail!("Failed to decode AOCL-MMR. Error was: {err}"), - }; - index += aocl_len; - - let swbf_inactive_len: usize = match sequence.get(index) { - Some(swbf_inactive_len) => swbf_inactive_len.value().try_into()?, - None => anyhow::bail!("Invalid sequence length for decoding MutatorSetKernel."), - }; - index += 1; - let swbf_inactive = match MMR::decode(&sequence[index..(index + swbf_inactive_len)]) { - Ok(decoded) => *decoded, - Err(err) => anyhow::bail!("Failed to decode SWBF-MMR. Error was: {err}"), - }; - index += swbf_inactive_len; - - let swbf_active_len: usize = match sequence.get(index) { - Some(swbf_active_len) => swbf_active_len.value().try_into()?, - None => anyhow::bail!("Invalid sequence length for decoding MutatorSetKernel."), - }; - index += 1; - let swbf_active = *ActiveWindow::decode(&sequence[index..(index + swbf_active_len)])?; - index += swbf_active_len; - - if sequence.len() != index { - anyhow::bail!("Invalid sequence length for decoding MutatorSetKernel."); - } - - Ok(Box::new(Self { - aocl, - swbf_inactive, - swbf_active, - })) - } - - fn encode(&self) -> Vec { - let aocl_encoded = self.aocl.encode(); - let aocl_len = BFieldElement::new(aocl_encoded.len() as u64); - - let swbf_inactive_encoded = self.swbf_inactive.encode(); - let swbf_inactive_len = BFieldElement::new(swbf_inactive_encoded.len() as u64); - - let swbf_active_encoded = self.swbf_active.encode(); - let swbf_active_len = BFieldElement::new(swbf_active_encoded.len() as u64); - [ - vec![aocl_len], - aocl_encoded, - vec![swbf_inactive_len], - swbf_inactive_encoded, - vec![swbf_active_len], - swbf_active_encoded, - ] - .concat() - } - - fn static_length() -> Option { - None - } -} - -#[cfg(test)] -mod accumulation_scheme_tests { - use rand::prelude::*; - use rand::Rng; - - use twenty_first::util_types::mmr::mmr_accumulator::MmrAccumulator; - - use crate::util_types::mutator_set::mutator_set_accumulator::MutatorSetAccumulator; - use crate::util_types::mutator_set::mutator_set_trait::commit; - use crate::util_types::mutator_set::mutator_set_trait::MutatorSet; - use crate::util_types::test_shared::mutator_set::*; - - use super::*; - - #[test] - fn get_batch_index_test() { - // Verify that the method to get batch index returns sane results - - let mut mutator_set = MutatorSetAccumulator::default(); - assert_eq!( - 0, - mutator_set.kernel.get_batch_index(), - "Batch index for empty MS must be zero" - ); - - for i in 0..BATCH_SIZE { - let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); - mutator_set.add(&addition_record); - assert_eq!( - 0, - mutator_set.kernel.get_batch_index(), - "Batch index must be 0 after adding {} elements", - i - ); - } - - let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); - mutator_set.add(&addition_record); - assert_eq!( - 1, - mutator_set.kernel.get_batch_index(), - "Batch index must be one after adding BATCH_SIZE+1 elements" - ); - } - - #[test] - fn mutator_set_hash_test() { - let empty_set = MutatorSetAccumulator::default(); - let empty_hash = empty_set.hash(); - - // Add one element to append-only commitment list - let mut set_with_aocl_append = MutatorSetAccumulator::default(); - - let (item0, _sender_randomness, _receiver_preimage) = make_item_and_randomnesses(); - - set_with_aocl_append.kernel.aocl.append(item0); - let hash_of_aocl_append = set_with_aocl_append.hash(); - - assert_ne!( - empty_hash, hash_of_aocl_append, - "Appending to AOCL must change MutatorSet commitment" - ); - - // Manipulate inactive SWBF - let mut set_with_swbf_inactive_append = MutatorSetAccumulator::default(); - set_with_swbf_inactive_append - .kernel - .swbf_inactive - .append(item0); - let hash_of_one_in_inactive = set_with_swbf_inactive_append.hash(); - assert_ne!( - empty_hash, hash_of_one_in_inactive, - "Changing inactive must change MS hash" - ); - assert_ne!( - hash_of_aocl_append, hash_of_one_in_inactive, - "One in AOCL and one in inactive must hash to different digests" - ); - - // Manipulate active window - let mut active_window_changed = empty_set; - active_window_changed.kernel.swbf_active.insert(42); - assert_ne!( - empty_hash, - active_window_changed.hash(), - "Changing active window must change commitment" - ); - - // Sanity check bc reasons - active_window_changed.kernel.swbf_active.remove(42); - assert_eq!( - empty_hash, - active_window_changed.hash(), - "Commitment to empty MS must be consistent" - ); - } - - #[test] - fn ms_get_indices_test() { - // Test that `get_indices` behaves as expected, i.e. - // that it always returns something of length `NUM_TRIALS`, and that the - // returned values are in the expected range. - - let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let ret: [u128; NUM_TRIALS as usize] = - get_swbf_indices(item, sender_randomness, receiver_preimage, 0); - assert_eq!(NUM_TRIALS as usize, ret.len()); - assert!(ret.iter().all(|&x| x < WINDOW_SIZE as u128)); - } - - #[test] - fn ms_get_indices_test_big() { - // Test that `get_indices` behaves as expected. I.e. that it returns indices in the correct range, - // and always returns something of length `NUM_TRIALS`. - - for _ in 0..1000 { - let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let ret: [u128; NUM_TRIALS as usize] = - get_swbf_indices(item, sender_randomness, receiver_preimage, 0); - assert_eq!(NUM_TRIALS as usize, ret.len()); - assert!(ret.iter().all(|&x| x < WINDOW_SIZE as u128)); - } - - for _ in 0..1000 { - let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let ret: [u128; NUM_TRIALS as usize] = get_swbf_indices( - item, - sender_randomness, - receiver_preimage, - (17 * BATCH_SIZE) as u64, - ); - assert_eq!(NUM_TRIALS as usize, ret.len()); - assert!(ret - .iter() - .all(|&x| (x as u32) < WINDOW_SIZE + 17 * CHUNK_SIZE - && (x as u32) >= 17 * CHUNK_SIZE)); - } - } - - #[test] - fn init_test() { - let accumulator = MutatorSetAccumulator::default(); - let mut rms = empty_rusty_mutator_set(); - let archival = rms.ams_mut(); - - // Verify that function to get batch index does not overflow for the empty MS - assert_eq!( - 0, - accumulator.kernel.get_batch_index(), - "Batch index must be zero for empty MS accumulator" - ); - assert_eq!( - 0, - archival.kernel.get_batch_index(), - "Batch index must be zero for empty archival MS" - ); - } - - #[test] - fn verify_future_indices_test() { - // Ensure that `verify` does not crash when given a membership proof - // that represents a future addition to the AOCL. - - let mut mutator_set = MutatorSetAccumulator::default().kernel; - let empty_mutator_set = MutatorSetAccumulator::default().kernel; - - for _ in 0..2 * BATCH_SIZE + 2 { - let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - - let addition_record: AdditionRecord = - commit(item, sender_randomness, receiver_preimage.hash::()); - let membership_proof: MsMembershipProof = - mutator_set.prove(item, sender_randomness, receiver_preimage); - mutator_set.add_helper(&addition_record); - assert!(mutator_set.verify(item, &membership_proof)); - - // Verify that a future membership proof returns false and does not crash - assert!(!empty_mutator_set.verify(item, &membership_proof)); - } - } - - #[test] - fn test_membership_proof_update_from_add() { - let mut mutator_set = MutatorSetAccumulator::default(); - let (own_item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - - let addition_record = commit( - own_item, - sender_randomness, - receiver_preimage.hash::(), - ); - let mut membership_proof = - mutator_set.prove(own_item, sender_randomness, receiver_preimage); - mutator_set.kernel.add_helper(&addition_record); - - // Update membership proof with add operation. Verify that it has changed, and that it now fails to verify. - let (new_item, new_sender_randomness, new_receiver_preimage) = make_item_and_randomnesses(); - let new_addition_record = commit( - new_item, - new_sender_randomness, - new_receiver_preimage.hash::(), - ); - let original_membership_proof = membership_proof.clone(); - let changed_mp = match membership_proof.update_from_addition( - own_item, - &mutator_set, - &new_addition_record, - ) { - Ok(changed) => changed, - Err(err) => panic!("{}", err), - }; - assert!( - changed_mp, - "Update must indicate that membership proof has changed" - ); - assert_ne!( - original_membership_proof.auth_path_aocl, - membership_proof.auth_path_aocl - ); - assert!( - mutator_set.verify(own_item, &original_membership_proof), - "Original membership proof must verify prior to addition" - ); - assert!( - !mutator_set.verify(own_item, &membership_proof), - "New membership proof must fail to verify prior to addition" - ); - - // Insert the new element into the mutator set, then verify that the membership proof works and - // that the original membership proof is invalid. - mutator_set.kernel.add_helper(&new_addition_record); - assert!( - !mutator_set.verify(own_item, &original_membership_proof), - "Original membership proof must fail to verify after addition" - ); - assert!( - mutator_set.verify(own_item, &membership_proof), - "New membership proof must verify after addition" - ); - } - - #[test] - fn membership_proof_updating_from_add_pbt() { - let mut rng = thread_rng(); - - let mut mutator_set = MutatorSetAccumulator::default(); - - let num_additions = rng.gen_range(0..=100i32); - println!( - "running multiple additions test for {} additions", - num_additions - ); - - let mut membership_proofs_and_items: Vec<(MsMembershipProof, Digest)> = vec![]; - for i in 0..num_additions { - println!("loop iteration {}", i); - - let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - - let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); - let membership_proof = mutator_set.prove(item, sender_randomness, receiver_preimage); - - // Update all membership proofs - for (mp, itm) in membership_proofs_and_items.iter_mut() { - let original_mp = mp.clone(); - let changed_res = mp.update_from_addition(*itm, &mutator_set, &addition_record); - assert!(changed_res.is_ok()); - - // verify that the boolean returned value from the updater method is set correctly - assert_eq!(changed_res.unwrap(), original_mp != *mp); - } - - // Add the element - assert!(!mutator_set.verify(item, &membership_proof)); - mutator_set.kernel.add_helper(&addition_record); - assert!(mutator_set.verify(item, &membership_proof)); - membership_proofs_and_items.push((membership_proof, item)); - - // Verify that all membership proofs work - assert!(membership_proofs_and_items - .clone() - .into_iter() - .all(|(mp, itm)| mutator_set.verify(itm, &mp))); - } - } - - #[test] - fn test_add_and_prove() { - let mut mutator_set = MutatorSetAccumulator::default(); - let (item0, sender_randomness0, receiver_preimage0) = make_item_and_randomnesses(); - - let addition_record = commit(item0, sender_randomness0, receiver_preimage0.hash::()); - let membership_proof = mutator_set.prove(item0, sender_randomness0, receiver_preimage0); - - assert!(!mutator_set.verify(item0, &membership_proof)); - - mutator_set.kernel.add_helper(&addition_record); - - assert!(mutator_set.verify(item0, &membership_proof)); - - // Insert a new item and verify that this still works - let (item1, sender_randomness1, receiver_preimage1) = make_item_and_randomnesses(); - let new_ar = commit(item1, sender_randomness1, receiver_preimage1.hash::()); - let new_mp = mutator_set.prove(item1, sender_randomness1, receiver_preimage1); - assert!(!mutator_set.verify(item1, &new_mp)); - - mutator_set.kernel.add_helper(&new_ar); - assert!(mutator_set.verify(item1, &new_mp)); - - // Insert ~2*BATCH_SIZE more elements and - // verify that it works throughout. The reason we insert this many - // is that we want to make sure that the window slides into a new - // position. - for _ in 0..2 * BATCH_SIZE + 4 { - let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let other_ar = commit(item, sender_randomness, receiver_preimage.hash::()); - let other_mp = mutator_set.prove(item, sender_randomness, receiver_preimage); - assert!(!mutator_set.verify(item, &other_mp)); - - mutator_set.kernel.add_helper(&other_ar); - assert!(mutator_set.verify(item, &other_mp)); - } - } - - #[test] - fn batch_update_from_addition_and_removal_test() { - let mut mutator_set = MutatorSetAccumulator::default(); - - // It's important to test number of additions around the shifting of the window, - // i.e. around batch size. - let num_additions_list = vec![ - 1, - 2, - BATCH_SIZE - 1, - BATCH_SIZE, - BATCH_SIZE + 1, - 6 * BATCH_SIZE - 1, - 6 * BATCH_SIZE, - 6 * BATCH_SIZE + 1, - ]; - - let mut membership_proofs: Vec = vec![]; - let mut items = vec![]; - - for num_additions in num_additions_list { - for _ in 0..num_additions { - let (new_item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - - let addition_record = commit( - new_item, - sender_randomness, - receiver_preimage.hash::(), - ); - let membership_proof = - mutator_set.prove(new_item, sender_randomness, receiver_preimage); - - // Update *all* membership proofs with newly added item - let batch_update_res = MsMembershipProof::batch_update_from_addition( - &mut membership_proofs.iter_mut().collect::>(), - &items, - &mutator_set.kernel, - &addition_record, - ); - assert!(batch_update_res.is_ok()); - - mutator_set.kernel.add_helper(&addition_record); - assert!(mutator_set.verify(new_item, &membership_proof)); - - for (mp, &item) in membership_proofs.iter().zip(items.iter()) { - assert!(mutator_set.verify(item, mp)); - } - - membership_proofs.push(membership_proof); - items.push(new_item); - } - - // Remove items from MS, and verify correct updating of membership proofs - for _ in 0..num_additions { - let item = items.pop().unwrap(); - let mp = membership_proofs.pop().unwrap(); - assert!(mutator_set.verify(item, &mp)); - - // generate removal record - let removal_record: RemovalRecord = mutator_set.drop(item, &mp); - assert!(removal_record.validate(&mutator_set.kernel)); - assert!(mutator_set.kernel.can_remove(&removal_record)); - - // update membership proofs - let res = MsMembershipProof::batch_update_from_remove( - &mut membership_proofs.iter_mut().collect::>(), - &removal_record, - ); - assert!(res.is_ok()); - - // remove item from set - mutator_set.kernel.remove_helper(&removal_record); - assert!(!mutator_set.verify(item, &mp)); - - for (&itm, membp) in items.iter().zip(membership_proofs.iter()) { - assert!(mutator_set.verify(itm, membp)); - } - } - } - } - - #[test] - fn test_multiple_adds() { - let mut mutator_set = MutatorSetAccumulator::default(); - - let num_additions = 65; - - let mut items_and_membership_proofs: Vec<(Digest, MsMembershipProof)> = vec![]; - - for _ in 0..num_additions { - let (new_item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - - let addition_record = commit( - new_item, - sender_randomness, - receiver_preimage.hash::(), - ); - let membership_proof = - mutator_set.prove(new_item, sender_randomness, receiver_preimage); - - // Update *all* membership proofs with newly added item - for (updatee_item, mp) in items_and_membership_proofs.iter_mut() { - let original_mp = mp.clone(); - assert!(mutator_set.verify(*updatee_item, mp)); - let changed_res = - mp.update_from_addition(*updatee_item, &mutator_set, &addition_record); - assert!(changed_res.is_ok()); - - // verify that the boolean returned value from the updater method is set correctly - assert_eq!(changed_res.unwrap(), original_mp != *mp); - } - - mutator_set.kernel.add_helper(&addition_record); - assert!(mutator_set.verify(new_item, &membership_proof)); - - (0..items_and_membership_proofs.len()).for_each(|j| { - let (old_item, mp) = &items_and_membership_proofs[j]; - assert!(mutator_set.verify(*old_item, mp)) - }); - - items_and_membership_proofs.push((new_item, membership_proof)); - } - - // Verify all membership proofs - (0..items_and_membership_proofs.len()).for_each(|k| { - assert!(mutator_set.verify( - items_and_membership_proofs[k].0, - &items_and_membership_proofs[k].1, - )); - }); - - // Remove items from MS, and verify correct updating of membership proof - (0..num_additions).for_each(|i| { - (i..items_and_membership_proofs.len()).for_each(|k| { - assert!(mutator_set.verify( - items_and_membership_proofs[k].0, - &items_and_membership_proofs[k].1, - )); - }); - let (item, mp) = items_and_membership_proofs[i].clone(); - - assert!(mutator_set.verify(item, &mp)); - - // generate removal record - let removal_record: RemovalRecord = mutator_set.drop(item, &mp); - assert!(removal_record.validate(&mutator_set.kernel)); - assert!(mutator_set.kernel.can_remove(&removal_record)); - (i..items_and_membership_proofs.len()).for_each(|k| { - assert!(mutator_set.verify( - items_and_membership_proofs[k].0, - &items_and_membership_proofs[k].1, - )); - }); - - // update membership proofs - ((i + 1)..num_additions).for_each(|j| { - assert!(mutator_set.verify( - items_and_membership_proofs[j].0, - &items_and_membership_proofs[j].1 - )); - let update_res = items_and_membership_proofs[j] - .1 - .update_from_remove(&removal_record.clone()); - assert!(update_res.is_ok()); - }); - - // remove item from set - mutator_set.kernel.remove_helper(&removal_record); - assert!(!mutator_set.verify(item, &mp)); - - ((i + 1)..items_and_membership_proofs.len()).for_each(|k| { - assert!(mutator_set.verify( - items_and_membership_proofs[k].0, - &items_and_membership_proofs[k].1, - )); - }); - }); - } - - #[test] - fn ms_serialization_test() { - // This test verifies that the mutator set structure can be serialized and deserialized. - // When Rust spawns threads (as it does when it runs tests, and in the Neptune Core client), - // the new threads only get 2MB stack memory initially. This can result in stack overflows - // in the runtime. This test is to verify that that does not happen. - // Cf. https://stackoverflow.com/questions/72618777/how-to-deserialize-a-nested-big-array - // and https://stackoverflow.com/questions/72621410/how-do-i-use-serde-stacker-in-my-deserialize-implementation - type Mmr = MmrAccumulator; - type Ms = MutatorSetKernel; - let mut mutator_set: Ms = MutatorSetAccumulator::default().kernel; - - let json_empty = serde_json::to_string(&mutator_set).unwrap(); - println!("json = \n{}", json_empty); - let s_back = serde_json::from_str::(&json_empty).unwrap(); - assert!(s_back.aocl.is_empty()); - assert!(s_back.swbf_inactive.is_empty()); - assert!(s_back.swbf_active.sbf.is_empty()); - - // Add an item, verify correct serialization - let (mp, item) = insert_mock_item(&mut mutator_set); - let json_one_add = serde_json::to_string(&mutator_set).unwrap(); - println!("json_one_add = \n{}", json_one_add); - let s_back_one_add = serde_json::from_str::(&json_one_add).unwrap(); - assert_eq!(1, s_back_one_add.aocl.count_leaves()); - assert!(s_back_one_add.swbf_inactive.is_empty()); - assert!(s_back_one_add.swbf_active.sbf.is_empty()); - assert!(s_back_one_add.verify(item, &mp)); - - // Remove an item, verify correct serialization - remove_mock_item(&mut mutator_set, item, &mp); - let json_one_add_one_remove = serde_json::to_string(&mutator_set).unwrap(); - println!("json_one_add = \n{}", json_one_add_one_remove); - let s_back_one_add_one_remove = - serde_json::from_str::(&json_one_add_one_remove).unwrap(); - assert_eq!( - 1, - s_back_one_add_one_remove.aocl.count_leaves(), - "AOCL must still have exactly one leaf" - ); - assert!( - s_back_one_add_one_remove.swbf_inactive.is_empty(), - "Window should not have moved" - ); - assert!( - !s_back_one_add_one_remove.swbf_active.sbf.is_empty(), - "Some of the indices in the active window must now be set" - ); - assert!( - !s_back_one_add_one_remove.verify(item, &mp), - "Membership proof must fail after removal" - ); - } -} diff --git a/src/util_types/mutator_set/mutator_set_trait.rs b/src/util_types/mutator_set/mutator_set_trait.rs deleted file mode 100644 index 23674da1..00000000 --- a/src/util_types/mutator_set/mutator_set_trait.rs +++ /dev/null @@ -1,54 +0,0 @@ -use crate::models::blockchain::shared::Hash; -use crate::prelude::twenty_first; - -use twenty_first::shared_math::tip5::Digest; -use twenty_first::util_types::algebraic_hasher::AlgebraicHasher; - -use super::addition_record::AdditionRecord; -use super::ms_membership_proof::MsMembershipProof; -use super::removal_record::RemovalRecord; - -/// Generates an addition record from an item and explicit random- -/// ness. The addition record is itself a commitment to the item. -pub fn commit(item: Digest, sender_randomness: Digest, receiver_digest: Digest) -> AdditionRecord { - let canonical_commitment = - Hash::hash_pair(Hash::hash_pair(item, sender_randomness), receiver_digest); - - AdditionRecord::new(canonical_commitment) -} - -pub trait MutatorSet { - /// Generates a membership proof that will be valid when the item - /// is added to the mutator set. - fn prove( - &mut self, - item: Digest, - sender_randomness: Digest, - receiver_preimage: Digest, - ) -> MsMembershipProof; - - fn verify(&self, item: Digest, membership_proof: &MsMembershipProof) -> bool; - - /// Generates a removal record with which to update the set commitment. - fn drop(&self, item: Digest, membership_proof: &MsMembershipProof) -> RemovalRecord; - - /// Updates the set-commitment with an addition record. - fn add(&mut self, addition_record: &AdditionRecord); - - /// Updates the mutator set so as to remove the item determined by - /// its removal record. - fn remove(&mut self, removal_record: &RemovalRecord); - - /// batch_remove - /// Apply multiple removal records, and update a list of membership proofs to - /// be valid after the application of these removal records. - fn batch_remove( - &mut self, - removal_records: Vec, - preserved_membership_proofs: &mut [&mut MsMembershipProof], - ); - - /// hash - /// Return single hash digest that commits to the entire mutator set - fn hash(&self) -> Digest; -} diff --git a/src/util_types/mutator_set/removal_record.rs b/src/util_types/mutator_set/removal_record.rs index 08102ce7..6b36a98f 100644 --- a/src/util_types/mutator_set/removal_record.rs +++ b/src/util_types/mutator_set/removal_record.rs @@ -19,7 +19,7 @@ use twenty_first::shared_math::tip5::Digest; use twenty_first::util_types::algebraic_hasher::AlgebraicHasher; use super::chunk_dictionary::{pseudorandom_chunk_dictionary, ChunkDictionary}; -use super::mutator_set_kernel::MutatorSetKernel; +use super::mutator_set_accumulator::MutatorSetAccumulator; use super::shared::{ get_batch_mutation_argument_for_removal_record, indices_to_hash_map, BATCH_SIZE, CHUNK_SIZE, NUM_TRIALS, @@ -136,17 +136,17 @@ pub struct RemovalRecord { } impl RemovalRecord { - /// Update a batch of removal records that are synced to a given mutator set, given - /// that that mutator set will be updated with an addition. (The addition record + /// Update a batch of removal records that are synced to a given mutator set, in anticipation + /// of one addition to that mutator set. (The addition record /// does not matter; all necessary information is in the mutator set.) - pub fn batch_update_from_addition>( + pub fn batch_update_from_addition( removal_records: &mut [&mut Self], - mutator_set: &mut MutatorSetKernel, + mutator_set: &MutatorSetAccumulator, ) { let new_item_index = mutator_set.aocl.count_leaves(); // if window does not slide, do nothing - if !MutatorSetKernel::::window_slides(new_item_index) { + if !MutatorSetAccumulator::window_slides(new_item_index) { return; } @@ -278,10 +278,7 @@ impl RemovalRecord { } /// Validates that a removal record is synchronized against the inactive part of the SWBF - pub fn validate(&self, mutator_set: &MutatorSetKernel) -> bool - where - M: Mmr, - { + pub fn validate(&self, mutator_set: &MutatorSetAccumulator) -> bool { let peaks = mutator_set.swbf_inactive.get_peaks(); self.target_chunks .dictionary @@ -326,16 +323,16 @@ mod removal_record_tests { use rand::{thread_rng, Rng, RngCore}; use crate::util_types::mutator_set::addition_record::AdditionRecord; + use crate::util_types::mutator_set::commit; use crate::util_types::mutator_set::ms_membership_proof::MsMembershipProof; use crate::util_types::mutator_set::mutator_set_accumulator::MutatorSetAccumulator; - use crate::util_types::mutator_set::mutator_set_trait::{commit, MutatorSet}; use crate::util_types::mutator_set::shared::{CHUNK_SIZE, NUM_TRIALS}; use crate::util_types::test_shared::mutator_set::*; use super::*; fn get_item_mp_and_removal_record() -> (Digest, MsMembershipProof, RemovalRecord) { - let mut accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); + let accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); let mp: MsMembershipProof = accumulator.prove(item, sender_randomness, receiver_preimage); let removal_record: RemovalRecord = accumulator.drop(item, &mp); @@ -475,12 +472,12 @@ mod removal_record_tests { .iter_mut() .map(|x| &mut x.1) .collect::>(), - &mut accumulator.kernel, + &accumulator, ); let update_res_mp = MsMembershipProof::batch_update_from_addition( &mut mps.iter_mut().collect::>(), &items, - &accumulator.kernel, + &accumulator, &addition_record, ); assert!( @@ -494,12 +491,12 @@ mod removal_record_tests { for removal_record in removal_records.iter().map(|x| &x.1) { assert!( - removal_record.validate(&accumulator.kernel), + removal_record.validate(&accumulator), "removal records must validate, i = {}", i ); assert!( - accumulator.kernel.can_remove(removal_record), + accumulator.can_remove(removal_record), "removal records must return true on `can_remove`, i = {}", i ); @@ -519,18 +516,18 @@ mod removal_record_tests { removal_records.choose(&mut rand::thread_rng()).unwrap(); assert!(accumulator.verify(items[*chosen_index], &mps[*chosen_index])); assert!( - accumulator.kernel.can_remove(random_removal_record), + accumulator.can_remove(random_removal_record), "removal records must return true on `can_remove`", ); assert!( - random_removal_record.validate(&accumulator.kernel), + random_removal_record.validate(&accumulator), "removal record must have valid MMR MPs" ); accumulator.remove(random_removal_record); assert!(!accumulator.verify(items[*chosen_index], &mps[*chosen_index])); assert!( - !accumulator.kernel.can_remove(random_removal_record), + !accumulator.can_remove(random_removal_record), "removal records must return false on `can_remove` after removal", ); } @@ -559,12 +556,12 @@ mod removal_record_tests { .iter_mut() .map(|x| &mut x.1) .collect::>(), - &mut accumulator.kernel, + &accumulator, ); let update_res_mp = MsMembershipProof::batch_update_from_addition( &mut mps.iter_mut().collect::>(), &items, - &accumulator.kernel, + &accumulator, &addition_record, ); assert!( @@ -578,12 +575,12 @@ mod removal_record_tests { for removal_record in removal_records.iter().map(|x| &x.1) { assert!( - removal_record.validate(&accumulator.kernel), + removal_record.validate(&accumulator), "removal records must validate, i = {}", i ); assert!( - accumulator.kernel.can_remove(removal_record), + accumulator.can_remove(removal_record), "removal records must return true on `can_remove`, i = {}", i ); @@ -613,11 +610,11 @@ mod removal_record_tests { for removal_record in removal_records.iter().map(|x| &x.1) { assert!( - removal_record.validate(&accumulator.kernel), + removal_record.validate(&accumulator), "removal records must validate, i = {}", i ); - assert!(accumulator.kernel.can_remove(removal_record)); + assert!(accumulator.can_remove(removal_record)); } } @@ -626,10 +623,8 @@ mod removal_record_tests { assert!(original_first_removal_record .as_ref() .unwrap() - .validate(&accumulator.kernel)); - assert!(!accumulator - .kernel - .can_remove(&original_first_removal_record.unwrap())); + .validate(&accumulator)); + assert!(!accumulator.can_remove(&original_first_removal_record.unwrap())); } #[test] diff --git a/src/util_types/mutator_set/rusty_archival_mutator_set.rs b/src/util_types/mutator_set/rusty_archival_mutator_set.rs index 051f3d81..8b7c86b4 100644 --- a/src/util_types/mutator_set/rusty_archival_mutator_set.rs +++ b/src/util_types/mutator_set/rusty_archival_mutator_set.rs @@ -1,13 +1,15 @@ +use crate::database::storage::storage_schema::{ + traits::*, DbtSingleton, DbtVec, RustyKey, RustyValue, SimpleRustyStorage, +}; +use crate::database::NeptuneLevelDb; use crate::prelude::twenty_first; use crate::Hash; -use twenty_first::storage::level_db::DB; -use twenty_first::storage::storage_schema::{traits::*, DbtSingleton, DbtVec, SimpleRustyStorage}; -use twenty_first::{shared_math::tip5::Digest, util_types::mmr::archival_mmr::ArchivalMmr}; +use twenty_first::shared_math::tip5::Digest; use super::{ - active_window::ActiveWindow, archival_mutator_set::ArchivalMutatorSet, chunk::Chunk, - mutator_set_kernel::MutatorSetKernel, + active_window::ActiveWindow, archival_mmr::ArchivalMmr, + archival_mutator_set::ArchivalMutatorSet, chunk::Chunk, }; type AmsMmrStorage = DbtVec; @@ -20,28 +22,29 @@ pub struct RustyArchivalMutatorSet { } impl RustyArchivalMutatorSet { - pub fn connect(db: DB) -> Self { + pub async fn connect(db: NeptuneLevelDb) -> Self { let mut storage = SimpleRustyStorage::new_with_callback( db, "RustyArchivalMutatorSet-Schema", crate::LOG_LOCK_EVENT_CB, ); - let aocl = storage.schema.new_vec::("aocl"); - let swbfi = storage.schema.new_vec::("swbfi"); - let chunks = storage.schema.new_vec::("chunks"); - let active_window = storage.schema.new_singleton::>("active_window"); - let sync_label = storage.schema.new_singleton::("sync_label"); - storage.restore_or_new(); - - let kernel = MutatorSetKernel::> { - aocl: ArchivalMmr::::new(aocl), - swbf_inactive: ArchivalMmr::::new(swbfi), + let aocl = storage.schema.new_vec::("aocl").await; + let swbfi = storage.schema.new_vec::("swbfi").await; + let chunks = storage.schema.new_vec::("chunks").await; + let active_window = storage + .schema + .new_singleton::>("active_window") + .await; + let sync_label = storage.schema.new_singleton::("sync_label").await; + + let ams = ArchivalMutatorSet:: { + chunks, + aocl: ArchivalMmr::::new(aocl).await, + swbf_inactive: ArchivalMmr::::new(swbfi).await, swbf_active: ActiveWindow::new(), }; - let ams = ArchivalMutatorSet:: { chunks, kernel }; - Self { ams, storage, @@ -61,49 +64,47 @@ impl RustyArchivalMutatorSet { } #[inline] - pub fn get_sync_label(&self) -> Digest { - self.sync_label.get() + pub async fn get_sync_label(&self) -> Digest { + self.sync_label.get().await } #[inline] - pub fn set_sync_label(&mut self, sync_label: Digest) { - self.sync_label.set(sync_label); - } -} - -impl StorageWriter for RustyArchivalMutatorSet { - fn persist(&mut self) { - self.active_window_storage - .set(self.ams().kernel.swbf_active.sbf.clone()); - - self.storage.persist(); + pub async fn set_sync_label(&mut self, sync_label: Digest) { + self.sync_label.set(sync_label).await; } - fn restore_or_new(&mut self) { - self.storage.restore_or_new(); - + pub async fn restore_or_new(&mut self) { // The field `digests` of ArchivalMMR should always have at // least one element (a dummy digest), owing to 1-indexation. - self.ams_mut().kernel.aocl.fix_dummy(); - self.ams_mut().kernel.swbf_inactive.fix_dummy(); + self.ams_mut().aocl.fix_dummy_async().await; + self.ams_mut().swbf_inactive.fix_dummy_async().await; // populate active window - self.ams_mut().kernel.swbf_active.sbf = self.active_window_storage.get(); + self.ams_mut().swbf_active.sbf = self.active_window_storage.get().await; + } +} + +impl StorageWriter for RustyArchivalMutatorSet { + async fn persist(&mut self) { + self.active_window_storage + .set(self.ams().swbf_active.sbf.clone()) + .await; + + self.storage.persist().await; } } #[cfg(test)] mod tests { - use crate::util_types::mutator_set::mutator_set_trait::{commit, MutatorSet}; use itertools::Itertools; use rand::{random, thread_rng, RngCore}; use twenty_first::shared_math::tip5::Tip5; + use crate::util_types::mutator_set::commit; use crate::util_types::mutator_set::{ ms_membership_proof::MsMembershipProof, shared::BATCH_SIZE, }; use crate::util_types::test_shared::mutator_set::*; - use twenty_first::util_types::mmr::mmr_trait::Mmr; use super::*; @@ -115,51 +116,53 @@ mod tests { let num_removals = 50usize; let mut rng = thread_rng(); - let db = DB::open_new_test_database(false, None, None, None).unwrap(); + let db = NeptuneLevelDb::open_new_test_database(false, None, None, None) + .await + .unwrap(); let db_path = db.path().clone(); - let mut rusty_mutator_set: RustyArchivalMutatorSet = RustyArchivalMutatorSet::connect(db); + let mut rusty_mutator_set: RustyArchivalMutatorSet = + RustyArchivalMutatorSet::connect(db).await; println!("Connected to database"); - rusty_mutator_set.restore_or_new(); - println!("Restored or new odne."); + rusty_mutator_set.restore_or_new().await; + println!("Restored or new done."); let mut items = vec![]; let mut mps = vec![]; println!( "before additions mutator set contains {} elements", - rusty_mutator_set.ams().kernel.aocl.count_leaves() + rusty_mutator_set.ams().aocl.count_leaves().await ); for _ in 0..num_additions { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); - let mp = - rusty_mutator_set - .ams() - .kernel - .prove(item, sender_randomness, receiver_preimage); + let mp = rusty_mutator_set + .ams() + .prove(item, sender_randomness, receiver_preimage) + .await; MsMembershipProof::batch_update_from_addition( &mut mps.iter_mut().collect_vec(), &items, - &rusty_mutator_set.ams().kernel, + &rusty_mutator_set.ams().accumulator().await, &addition_record, ) .expect("Cannot batch update from addition"); mps.push(mp); items.push(item); - rusty_mutator_set.ams_mut().add(&addition_record); + rusty_mutator_set.ams_mut().add(&addition_record).await; } println!( "after additions mutator set contains {} elements", - rusty_mutator_set.ams().kernel.aocl.count_leaves() + rusty_mutator_set.ams().aocl.count_leaves().await ); // Verify membership for (mp, &item) in mps.iter().zip(items.iter()) { - assert!(rusty_mutator_set.ams().verify(item, mp)); + assert!(rusty_mutator_set.ams().verify(item, mp).await); } // Remove items @@ -171,15 +174,15 @@ mod tests { let membership_proof = mps[index].clone(); let removal_record = rusty_mutator_set .ams_mut() - .kernel - .drop(item, &membership_proof); + .drop(item, &membership_proof) + .await; MsMembershipProof::batch_update_from_remove( &mut mps.iter_mut().collect_vec(), &removal_record, ) .expect("Could not batch update membership proofs from remove"); - rusty_mutator_set.ams_mut().remove(&removal_record); + rusty_mutator_set.ams_mut().remove(&removal_record).await; removed_items.push(items.remove(index)); removed_mps.push(mps.remove(index)); @@ -189,35 +192,36 @@ mod tests { // a new archival object from the databases it contains and then check // that this archival MS contains the same values let sync_label: Digest = random(); - rusty_mutator_set.set_sync_label(sync_label); + rusty_mutator_set.set_sync_label(sync_label).await; println!( "at persistence mutator set aocl contains {} elements", - rusty_mutator_set.ams().kernel.aocl.count_leaves() + rusty_mutator_set.ams().aocl.count_leaves().await ); // persist and drop - rusty_mutator_set.persist(); + rusty_mutator_set.persist().await; - let active_window_before = rusty_mutator_set.ams().kernel.swbf_active.clone(); + let active_window_before = rusty_mutator_set.ams().swbf_active.clone(); drop(rusty_mutator_set); // Drop DB // new database - let new_db = DB::open_test_database(&db_path, true, None, None, None) + let new_db = NeptuneLevelDb::open_test_database(&db_path, true, None, None, None) + .await .expect("should open existing database"); let mut new_rusty_mutator_set: RustyArchivalMutatorSet = - RustyArchivalMutatorSet::connect(new_db); - new_rusty_mutator_set.restore_or_new(); + RustyArchivalMutatorSet::connect(new_db).await; + new_rusty_mutator_set.restore_or_new().await; // Verify memberships println!( "restored mutator set contains {} elements", - new_rusty_mutator_set.ams().kernel.aocl.count_leaves() + new_rusty_mutator_set.ams().aocl.count_leaves().await ); for (index, (mp, &item)) in mps.iter().zip(items.iter()).enumerate() { assert!( - new_rusty_mutator_set.ams().verify(item, mp), + new_rusty_mutator_set.ams().verify(item, mp).await, "membership proof {index} does not verify" ); } @@ -225,15 +229,15 @@ mod tests { // Verify non-membership for (index, (mp, &item)) in removed_mps.iter().zip(removed_items.iter()).enumerate() { assert!( - !new_rusty_mutator_set.ams().verify(item, mp), + !new_rusty_mutator_set.ams().verify(item, mp).await, "membership proof of non-member {index} still valid" ); } - let retrieved_sync_label = new_rusty_mutator_set.get_sync_label(); + let retrieved_sync_label = new_rusty_mutator_set.get_sync_label().await; assert_eq!(sync_label, retrieved_sync_label); - let active_window_after = new_rusty_mutator_set.ams().kernel.swbf_active.clone(); + let active_window_after = new_rusty_mutator_set.ams().swbf_active.clone(); assert_eq!(active_window_before, active_window_after); } diff --git a/src/util_types/sync/mod.rs b/src/util_types/sync/mod.rs deleted file mode 100644 index e905b092..00000000 --- a/src/util_types/sync/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! Provides sync helpers for sharing data between threads -pub mod tokio; diff --git a/src/util_types/test_shared/mutator_set.rs b/src/util_types/test_shared/mutator_set.rs index 6f877595..58712a47 100644 --- a/src/util_types/test_shared/mutator_set.rs +++ b/src/util_types/test_shared/mutator_set.rs @@ -1,4 +1,5 @@ use crate::prelude::twenty_first; +use crate::util_types::mutator_set::commit; use std::collections::HashMap; use std::marker::PhantomData; @@ -7,15 +8,15 @@ use itertools::Itertools; use rand::rngs::StdRng; use rand::{thread_rng, Rng, RngCore, SeedableRng}; +use crate::database::storage::storage_vec::traits::*; +use crate::database::NeptuneLevelDb; use twenty_first::shared_math::other::{log_2_ceil, log_2_floor}; use twenty_first::shared_math::tip5::Digest; -use twenty_first::storage::level_db::DB; use twenty_first::util_types::algebraic_hasher::AlgebraicHasher; use twenty_first::util_types::mmr::mmr_accumulator::MmrAccumulator; use twenty_first::util_types::mmr::mmr_membership_proof::MmrMembershipProof; use twenty_first::util_types::mmr::mmr_trait::Mmr; use twenty_first::util_types::mmr::shared_basic::leaf_index_to_mt_index_and_peak_index; -use twenty_first::util_types::storage_vec::StorageVec; use crate::util_types::mutator_set::active_window::ActiveWindow; use crate::util_types::mutator_set::archival_mutator_set::ArchivalMutatorSet; @@ -27,8 +28,6 @@ use crate::util_types::mutator_set::ms_membership_proof::{ pseudorandom_mmr_membership_proof, pseudorandom_mutator_set_membership_proof, MsMembershipProof, }; use crate::util_types::mutator_set::mutator_set_accumulator::MutatorSetAccumulator; -use crate::util_types::mutator_set::mutator_set_kernel::MutatorSetKernel; -use crate::util_types::mutator_set::mutator_set_trait::commit; use crate::util_types::mutator_set::removal_record::{pseudorandom_removal_record, RemovalRecord}; use crate::util_types::mutator_set::rusty_archival_mutator_set::RustyArchivalMutatorSet; use crate::util_types::mutator_set::shared::{CHUNK_SIZE, WINDOW_SIZE}; @@ -39,21 +38,21 @@ pub fn random_chunk_dictionary() -> ChunkDictionary { pseudorandom_chunk_dictionary(rng.gen::<[u8; 32]>()) } -pub fn get_all_indices_with_duplicates< - MmrStorage: StorageVec, - ChunkStorage: StorageVec, +pub async fn get_all_indices_with_duplicates< + MmrStorage: StorageVec + Send + Sync, + ChunkStorage: StorageVec + Send + Sync, >( archival_mutator_set: &mut ArchivalMutatorSet, ) -> Vec { let mut ret: Vec = vec![]; - for index in archival_mutator_set.kernel.swbf_active.sbf.iter() { + for index in archival_mutator_set.swbf_active.sbf.iter() { ret.push(*index as u128); } - let chunk_count = archival_mutator_set.chunks.len(); + let chunk_count = archival_mutator_set.chunks.len().await; for chunk_index in 0..chunk_count { - let chunk = archival_mutator_set.chunks.get(chunk_index); + let chunk = archival_mutator_set.chunks.get(chunk_index).await; for index in chunk.relative_indices.iter() { ret.push(*index as u128 + CHUNK_SIZE as u128 * chunk_index as u128); } @@ -71,15 +70,15 @@ pub fn make_item_and_randomnesses() -> (Digest, Digest, Digest) { } #[allow(clippy::type_complexity)] -pub fn empty_rusty_mutator_set() -> RustyArchivalMutatorSet { - let db = DB::open_new_test_database(true, None, None, None).unwrap(); - let rusty_mutator_set: RustyArchivalMutatorSet = RustyArchivalMutatorSet::connect(db); +pub async fn empty_rusty_mutator_set() -> RustyArchivalMutatorSet { + let db = NeptuneLevelDb::open_new_test_database(true, None, None, None) + .await + .unwrap(); + let rusty_mutator_set: RustyArchivalMutatorSet = RustyArchivalMutatorSet::connect(db).await; rusty_mutator_set } -pub fn insert_mock_item>( - mutator_set: &mut MutatorSetKernel, -) -> (MsMembershipProof, Digest) { +pub fn insert_mock_item(mutator_set: &mut MutatorSetAccumulator) -> (MsMembershipProof, Digest) { let (new_item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); let addition_record = commit( @@ -93,8 +92,8 @@ pub fn insert_mock_item>( (membership_proof, new_item) } -pub fn remove_mock_item>( - mutator_set: &mut MutatorSetKernel, +pub fn remove_mock_item( + mutator_set: &mut MutatorSetAccumulator, item: Digest, mp: &MsMembershipProof, ) { @@ -104,16 +103,10 @@ pub fn remove_mock_item>( /// Generate a random MSA. For serialization testing. Might not be a consistent or valid object. pub fn random_mutator_set_accumulator() -> MutatorSetAccumulator { - let kernel = random_mutator_set_kernel(); - MutatorSetAccumulator { kernel } -} - -/// Generate a random MSK. For serialization testing. Might not be a consistent or valid object. -pub fn random_mutator_set_kernel() -> MutatorSetKernel> { let aocl = random_mmra(); let swbf_inactive = random_mmra(); let swbf_active = random_swbf_active(); - MutatorSetKernel { + MutatorSetAccumulator { aocl, swbf_inactive, swbf_active, @@ -394,16 +387,16 @@ mod shared_tests_test { use super::*; - #[test] - fn can_call() { + #[tokio::test] + async fn can_call() { let rcd = random_chunk_dictionary(); assert!(!rcd.dictionary.is_empty()); let _ = random_removal_record(); - let mut rms = empty_rusty_mutator_set(); + let mut rms = empty_rusty_mutator_set().await; let ams = rms.ams_mut(); - let _ = get_all_indices_with_duplicates(ams); + let _ = get_all_indices_with_duplicates(ams).await; let _ = make_item_and_randomnesses(); - let _ = insert_mock_item(&mut ams.kernel); + let _ = insert_mock_item(&mut ams.accumulator().await); } #[test]