diff --git a/.changelog/v0.12.1/bug-fixes/942-vp-verify-masp-failure.md b/.changelog/v0.12.1/bug-fixes/942-vp-verify-masp-failure.md new file mode 100644 index 0000000000..4c3cfdee3c --- /dev/null +++ b/.changelog/v0.12.1/bug-fixes/942-vp-verify-masp-failure.md @@ -0,0 +1,2 @@ +- Avoid panicking unwrap()s in vp_verify_masp, to prevent crashing the node on + malformed transactions. ([#942](https://github.com/anoma/namada/pull/942)) \ No newline at end of file diff --git a/.changelog/v0.12.1/summary.md b/.changelog/v0.12.1/summary.md new file mode 100644 index 0000000000..330b094aec --- /dev/null +++ b/.changelog/v0.12.1/summary.md @@ -0,0 +1,2 @@ +Namada 0.12.1 is a hotfix release, fixing a node crash on malformed +transactions to the MASP. diff --git a/.changelog/v0.6.0/improvements/1088-updating-to-latest-abcipp.md b/.changelog/v0.6.0/improvements/1088-updating-to-latest-abcipp.md new file mode 100644 index 0000000000..3a65bc82cd --- /dev/null +++ b/.changelog/v0.6.0/improvements/1088-updating-to-latest-abcipp.md @@ -0,0 +1,14 @@ +- Ledger: Updated the version of Tendermint used for ABCI++ ([#1088](https://github.com/anoma/anoma/pull/1088)) + - Add full support for ProcessProposal and FinalizeBlock + - Updated the shims + - Updated `tendermint-rs`, `ibc-rs`, and `tower-abci` deps + - Updated the proto definitions + - Added Tendermint's new method of a BFT timestamping + - Updated the format of Tendermint's new config + - Fixed booting up the tendermint node in the ledger with correct settings + - Refactored storage to account for the fact that tendermint no longer passes in block headers +- Client: Configured Tendermints new event log and JSON RPC API for events querying ([#1088](https://github.com/anoma/anoma/pull/1088)) + - Added necessary config parameters to our tendermint node's configuration + - Wrote a jsonrpc client for querying tendermint's event logs + - Refactored how txs are submitted in the client when the `ABCI-plus-plus` feature is + set to use jsonrpc calls instead of websockets. diff --git a/.github/workflows/build-and-test-bridge.yml b/.github/workflows/build-and-test-bridge.yml index 6470c61698..1e0827f724 100644 --- a/.github/workflows/build-and-test-bridge.yml +++ b/.github/workflows/build-and-test-bridge.yml @@ -151,7 +151,7 @@ jobs: suffix: '' cache_key: namada cache_version: v2 - tendermint_artifact: tendermint-unreleased-ad825dcadbd4b98c3f91ce5a711e4fb36a69c377 + tendermint_artifact: tendermint-unreleased-v0.1.4-abciplus env: CARGO_INCREMENTAL: 0 @@ -357,14 +357,14 @@ jobs: index: 0 cache_key: namada cache_version: v2 - tendermint_artifact: tendermint-unreleased-ad825dcadbd4b98c3f91ce5a711e4fb36a69c377 + tendermint_artifact: tendermint-unreleased-v0.1.4-abciplus wait_for: namada-release-eth (ubuntu-20.04, 1.7.0, ABCI Release build, namada-e2e-release, v2) - name: e2e suffix: '' index: 1 cache_key: namada cache_version: v2 - tendermint_artifact: tendermint-unreleased-ad825dcadbd4b98c3f91ce5a711e4fb36a69c377 + tendermint_artifact: tendermint-unreleased-v0.1.4-abciplus wait_for: namada-release-eth (ubuntu-20.04, 1.7.0, ABCI Release build, namada-e2e-release, v2) env: diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 0789e8f880..ae3be0fb0b 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -153,7 +153,7 @@ jobs: suffix: '' cache_key: namada cache_version: v2 - tendermint_artifact: tendermint-unreleased-ad825dcadbd4b98c3f91ce5a711e4fb36a69c377 + tendermint_artifact: tendermint-unreleased-v0.1.4-abciplus env: CARGO_INCREMENTAL: 0 @@ -359,14 +359,14 @@ jobs: index: 0 cache_key: namada cache_version: v2 - tendermint_artifact: tendermint-unreleased-ad825dcadbd4b98c3f91ce5a711e4fb36a69c377 + tendermint_artifact: tendermint-unreleased-v0.1.4-abciplus wait_for: namada-release (ubuntu-20.04, 1.7.0, ABCI Release build, namada-e2e-release, v2) - name: e2e suffix: '' index: 1 cache_key: namada cache_version: v2 - tendermint_artifact: tendermint-unreleased-ad825dcadbd4b98c3f91ce5a711e4fb36a69c377 + tendermint_artifact: tendermint-unreleased-v0.1.4-abciplus wait_for: namada-release (ubuntu-20.04, 1.7.0, ABCI Release build, namada-e2e-release, v2) env: diff --git a/.github/workflows/build-tendermint.yml b/.github/workflows/build-tendermint.yml index 1e78f1d986..d7977a4b3b 100644 --- a/.github/workflows/build-tendermint.yml +++ b/.github/workflows/build-tendermint.yml @@ -23,7 +23,7 @@ jobs: make: - name: tendermint-unreleased repository: heliaxdev/tendermint - tendermint_version: ad825dcadbd4b98c3f91ce5a711e4fb36a69c377 + tendermint_version: v0.1.4-abciplus steps: - name: Build ${{ matrix.make.name }} diff --git a/.github/workflows/scripts/e2e.json b/.github/workflows/scripts/e2e.json index 8ee62bb236..9fcc4c0a7a 100644 --- a/.github/workflows/scripts/e2e.json +++ b/.github/workflows/scripts/e2e.json @@ -1,6 +1,7 @@ { "e2e::eth_bridge_tests::everything": 4, "e2e::ibc_tests::run_ledger_ibc": 140, + "e2e::eth_bridge_tests::test_add_to_bridge_pool": 10, "e2e::ledger_tests::double_signing_gets_slashed": 12, "e2e::ledger_tests::invalid_transactions": 8, "e2e::ledger_tests::ledger_many_txs_in_a_block": 41, @@ -21,4 +22,4 @@ "e2e::wallet_tests::wallet_encrypted_key_cmds": 1, "e2e::wallet_tests::wallet_encrypted_key_cmds_env_var": 1, "e2e::wallet_tests::wallet_unencrypted_key_cmds": 1 -} \ No newline at end of file +} diff --git a/CHANGELOG.md b/CHANGELOG.md index 9aaab24c50..c43936dd31 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # CHANGELOG +## v0.12.1 + +Namada 0.12.1 is a hotfix release, fixing a node crash on malformed +transactions to the MASP. + +### BUG FIXES + +- Avoid panicking unwrap()s in vp_verify_masp, to prevent crashing the node on + malformed transactions. ([#942](https://github.com/anoma/namada/pull/942)) + ## v0.12.0 Namada 0.12.0 is a scheduled minor release. diff --git a/Cargo.lock b/Cargo.lock index 1a0a9ae50b..971f21793c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,109 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "actix-codec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a7559404a7f3573127aab53c08ce37a6c6a315c374a31070f3c91cd1b4a7fe" +dependencies = [ + "bitflags", + "bytes 1.2.1", + "futures-core", + "futures-sink", + "log 0.4.17", + "memchr", + "pin-project-lite", + "tokio", + "tokio-util 0.7.4", +] + +[[package]] +name = "actix-http" +version = "3.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c83abf9903e1f0ad9973cc4f7b9767fd5a03a583f51a5b7a339e07987cd2724" +dependencies = [ + "actix-codec", + "actix-rt", + "actix-service", + "actix-utils", + "ahash", + "base64 0.13.1", + "bitflags", + "bytes 1.2.1", + "bytestring", + "derive_more", + "encoding_rs", + "flate2", + "futures-core", + "h2", + "http", + "httparse", + "httpdate", + "itoa", + "language-tags 0.3.2", + "local-channel", + "mime 0.3.16", + "percent-encoding 2.2.0", + "pin-project-lite", + "rand 0.8.5", + "sha1", + "smallvec 1.10.0", + "tracing 0.1.37", + "zstd", +] + +[[package]] +name = "actix-rt" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ea16c295198e958ef31930a6ef37d0fb64e9ca3b6116e6b93a8bdae96ee1000" +dependencies = [ + "futures-core", + "tokio", +] + +[[package]] +name = "actix-service" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b894941f818cfdc7ccc4b9e60fa7e53b5042a2e8567270f9147d5591893373a" +dependencies = [ + "futures-core", + "paste", + "pin-project-lite", +] + +[[package]] +name = "actix-tls" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fde0cf292f7cdc7f070803cb9a0d45c018441321a78b1042ffbbb81ec333297" +dependencies = [ + "actix-codec", + "actix-rt", + "actix-service", + "actix-utils", + "futures-core", + "http", + "log 0.4.17", + "openssl", + "pin-project-lite", + "tokio-openssl", + "tokio-util 0.7.4", +] + +[[package]] +name = "actix-utils" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88a1dcdff1466e3c2488e1cb5c36a71822750ad43839937f85d2f4d9f8b705d8" +dependencies = [ + "local-waker", + "pin-project-lite", +] + [[package]] name = "addr2line" version = "0.17.0" @@ -130,7 +233,7 @@ dependencies = [ "ark-serialize", "ark-std", "derivative", - "num-bigint", + "num-bigint 0.4.3", "num-traits 0.2.15", "paste", "rustc_version 0.3.3", @@ -153,7 +256,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" dependencies = [ - "num-bigint", + "num-bigint 0.4.3", "num-traits 0.2.15", "quote", "syn", @@ -414,7 +517,7 @@ dependencies = [ "pin-project-lite", "tokio", "tokio-rustls", - "tungstenite", + "tungstenite 0.12.0", "webpki-roots 0.21.1", ] @@ -450,6 +553,40 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "awc" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80ca7ff88063086d2e2c70b9f3b29b2fcd999bac68ac21731e66781970d68519" +dependencies = [ + "actix-codec", + "actix-http", + "actix-rt", + "actix-service", + "actix-tls", + "actix-utils", + "ahash", + "base64 0.13.1", + "bytes 1.2.1", + "cfg-if 1.0.0", + "derive_more", + "futures-core", + "futures-util", + "h2", + "http", + "itoa", + "log 0.4.17", + "mime 0.3.16", + "openssl", + "percent-encoding 2.2.0", + "pin-project-lite", + "rand 0.8.5", + "serde 1.0.147", + "serde_json", + "serde_urlencoded", + "tokio", +] + [[package]] name = "backtrace" version = "0.3.66" @@ -514,7 +651,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43473b34abc4b0b405efa0a250bac87eea888182b21687ee5c8115d279b0fda5" dependencies = [ - "bitvec", + "bitvec 0.22.3", "blake2s_simd 0.5.11", "byteorder", "crossbeam-channel 0.5.6", @@ -638,10 +775,22 @@ version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5237f00a8c86130a0cc317830e558b966dd7850d48a953d998c813f01a41b527" dependencies = [ - "funty", - "radium", + "funty 1.2.0", + "radium 0.6.2", + "tap", + "wyz 0.4.0", +] + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty 2.0.0", + "radium 0.7.0", "tap", - "wyz", + "wyz 0.5.1", ] [[package]] @@ -820,7 +969,7 @@ source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c dependencies = [ "borsh-derive-internal", "borsh-schema-derive-internal", - "proc-macro-crate", + "proc-macro-crate 0.1.5", "proc-macro2", "syn", ] @@ -856,12 +1005,28 @@ dependencies = [ "regex-automata", ] +[[package]] +name = "buf_redux" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" +dependencies = [ + "memchr", + "safemem", +] + [[package]] name = "bumpalo" version = "3.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +[[package]] +name = "byte-slice-cast" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" + [[package]] name = "byte-tools" version = "0.3.1" @@ -927,6 +1092,15 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" +[[package]] +name = "bytestring" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7f83e57d9154148e355404702e2694463241880b939570d7c97c014da7a69a1" +dependencies = [ + "bytes 1.2.1", +] + [[package]] name = "bzip2-sys" version = "0.1.11+1.0.8" @@ -1094,6 +1268,24 @@ dependencies = [ "vec_map", ] +[[package]] +name = "clarity" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "880114aafee14fa3a183582a82407474d53f4950b1695658e95bbb5d049bb253" +dependencies = [ + "lazy_static", + "num-bigint 0.4.3", + "num-traits 0.2.15", + "num256", + "secp256k1 0.24.1", + "serde 1.0.147", + "serde-rlp", + "serde_bytes", + "serde_derive", + "sha3 0.10.6", +] + [[package]] name = "cloudabi" version = "0.0.3" @@ -1221,6 +1413,12 @@ dependencies = [ "syn", ] +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "core-foundation" version = "0.9.3" @@ -1630,8 +1828,10 @@ version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ + "convert_case", "proc-macro2", "quote", + "rustc_version 0.4.0", "syn", ] @@ -1891,6 +2091,16 @@ dependencies = [ "byteorder", ] +[[package]] +name = "error" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6e606f14042bb87cc02ef6a14db6c90ab92ed6f62d87e69377bc759fd7987cc" +dependencies = [ + "traitobject", + "typeable", +] + [[package]] name = "error-chain" version = "0.12.4" @@ -1912,6 +2122,50 @@ dependencies = [ "serde_json", ] +[[package]] +name = "ethabi" +version = "17.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4966fba78396ff92db3b817ee71143eccd98acf0f876b8d600e585a670c5d1b" +dependencies = [ + "ethereum-types", + "hex", + "once_cell", + "regex", + "serde 1.0.147", + "serde_json", + "sha3 0.10.6", + "thiserror", + "uint", +] + +[[package]] +name = "ethbloom" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11da94e443c60508eb62cf256243a64da87304c2802ac2528847f79d750007ef" +dependencies = [ + "crunchy 0.2.2", + "fixed-hash", + "impl-rlp", + "impl-serde", + "tiny-keccak", +] + +[[package]] +name = "ethereum-types" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2827b94c556145446fcce834ca86b7abf0c39a805883fe20e72c5bfdb5a0dc6" +dependencies = [ + "ethbloom", + "fixed-hash", + "impl-rlp", + "impl-serde", + "primitive-types", + "uint", +] + [[package]] name = "event-listener" version = "2.5.3" @@ -1964,7 +2218,7 @@ dependencies = [ [[package]] name = "ferveo" version = "0.1.1" -source = "git+https://github.com/anoma/ferveo#1022ab2c7ccc689abcc05e5a08df6fb0c2a3fc65" +source = "git+https://github.com/anoma/ferveo?rev=9e5e91c954158e7cff45c483fd06cd649a81553f#9e5e91c954158e7cff45c483fd06cd649a81553f" dependencies = [ "anyhow", "ark-bls12-381", @@ -1987,7 +2241,7 @@ dependencies = [ "itertools", "measure_time", "miracl_core", - "num", + "num 0.4.0", "rand 0.7.3", "rand 0.8.5", "serde 1.0.147", @@ -2001,7 +2255,7 @@ dependencies = [ [[package]] name = "ferveo-common" version = "0.1.0" -source = "git+https://github.com/anoma/ferveo#1022ab2c7ccc689abcc05e5a08df6fb0c2a3fc65" +source = "git+https://github.com/anoma/ferveo?rev=9e5e91c954158e7cff45c483fd06cd649a81553f#9e5e91c954158e7cff45c483fd06cd649a81553f" dependencies = [ "anyhow", "ark-ec", @@ -2017,7 +2271,7 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "131655483be284720a17d74ff97592b8e76576dc25563148601df2d7c9080924" dependencies = [ - "bitvec", + "bitvec 0.22.3", "rand_core 0.6.4", "subtle", ] @@ -2057,6 +2311,18 @@ dependencies = [ "windows-sys 0.42.0", ] +[[package]] +name = "fixed-hash" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + [[package]] name = "fixedbitset" version = "0.4.2" @@ -2122,7 +2388,7 @@ dependencies = [ "block-modes", "cipher", "libm", - "num-bigint", + "num-bigint 0.4.3", "num-integer", "num-traits 0.2.15", ] @@ -2160,6 +2426,12 @@ name = "funty" version = "1.2.0" source = "git+https://github.com/bitvecto-rs/funty/?rev=7ef0d890fbcd8b3def1635ac1a877fc298488446#7ef0d890fbcd8b3def1635ac1a877fc298488446" +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + [[package]] name = "futures" version = "0.1.31" @@ -2378,7 +2650,7 @@ dependencies = [ [[package]] name = "group-threshold-cryptography" version = "0.1.0" -source = "git+https://github.com/anoma/ferveo#1022ab2c7ccc689abcc05e5a08df6fb0c2a3fc65" +source = "git+https://github.com/anoma/ferveo?rev=9e5e91c954158e7cff45c483fd06cd649a81553f#9e5e91c954158e7cff45c483fd06cd649a81553f" dependencies = [ "anyhow", "ark-bls12-381", @@ -2529,6 +2801,12 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "heck" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" + [[package]] name = "hermit-abi" version = "0.1.19" @@ -2633,7 +2911,7 @@ checksum = "0a0652d9a2609a968c14be1a9ea00bf4b1d64e2e1f53a1b51b6fff3a6e829273" dependencies = [ "base64 0.9.3", "httparse", - "language-tags", + "language-tags 0.2.2", "log 0.3.9", "mime 0.2.6", "num_cpus", @@ -2860,8 +3138,8 @@ dependencies = [ "k256", "moka", "nanoid", - "num-bigint", - "num-rational", + "num-bigint 0.4.3", + "num-rational 0.4.1", "prost", "prost-types", "regex", @@ -2901,7 +3179,7 @@ dependencies = [ "prost", "ripemd160", "sha2 0.9.9", - "sha3", + "sha3 0.9.1", "sp-std", ] @@ -2932,6 +3210,44 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-rlp" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" +dependencies = [ + "rlp", +] + +[[package]] +name = "impl-serde" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" +dependencies = [ + "serde 1.0.147", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "incrementalmerkletree" version = "0.2.0" @@ -2988,6 +3304,12 @@ dependencies = [ "web-sys", ] +[[package]] +name = "integer-encoding" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" + [[package]] name = "iovec" version = "0.1.4" @@ -3042,7 +3364,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2e7baec19d4e83f9145d4891178101a604565edff9645770fc979804138b04c" dependencies = [ - "bitvec", + "bitvec 0.22.3", "bls12_381", "ff", "group", @@ -3094,6 +3416,12 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" +[[package]] +name = "language-tags" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4345964bb142484797b161f473a503a434de77149dd8c7427788c6e13379388" + [[package]] name = "lazy_static" version = "1.4.0" @@ -3265,6 +3593,24 @@ dependencies = [ "serde 1.0.147", ] +[[package]] +name = "local-channel" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f303ec0e94c6c54447f84f3b0ef7af769858a9c4ef56ef2a986d3dcd4c3fc9c" +dependencies = [ + "futures-core", + "futures-sink", + "futures-util", + "local-waker", +] + +[[package]] +name = "local-waker" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e34f76eb3611940e0e7d53a9aaa4e6a3151f69541a282fd0dad5571420c53ff1" + [[package]] name = "lock_api" version = "0.3.4" @@ -3354,7 +3700,7 @@ source = "git+https://github.com/anoma/masp?rev=bee40fc465f6afbd10558d12fe96eb17 dependencies = [ "aes", "bip0039", - "bitvec", + "bitvec 0.22.3", "blake2b_simd 1.0.0", "blake2s_simd 1.0.0", "bls12_381", @@ -3488,6 +3834,24 @@ dependencies = [ "zeroize", ] +[[package]] +name = "message-io" +version = "0.14.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eee18ff0c94dec5f2da5faa939b3b40122c9c38ff6d934d0917b5313ddc7b5e4" +dependencies = [ + "crossbeam-channel 0.5.6", + "crossbeam-utils 0.8.12", + "integer-encoding", + "lazy_static", + "log 0.4.17", + "mio 0.7.14", + "serde 1.0.147", + "strum", + "tungstenite 0.16.0", + "url 2.3.1", +] + [[package]] name = "mime" version = "0.2.6" @@ -3554,12 +3918,25 @@ dependencies = [ "kernel32-sys", "libc", "log 0.4.17", - "miow", + "miow 0.2.2", "net2", "slab", "winapi 0.2.8", ] +[[package]] +name = "mio" +version = "0.7.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" +dependencies = [ + "libc", + "log 0.4.17", + "miow 0.3.7", + "ntapi", + "winapi 0.3.9", +] + [[package]] name = "mio" version = "0.8.5" @@ -3584,6 +3961,15 @@ dependencies = [ "ws2_32-sys", ] +[[package]] +name = "miow" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "miracl_core" version = "2.3.0" @@ -3633,9 +4019,27 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" +[[package]] +name = "multipart" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" +dependencies = [ + "buf_redux", + "httparse", + "log 0.4.17", + "mime 0.3.16", + "mime_guess", + "quick-error 1.2.3", + "rand 0.8.5", + "safemem", + "tempfile", + "twoway", +] + [[package]] name = "namada" -version = "0.12.0" +version = "0.12.1" dependencies = [ "assert_matches", "async-trait", @@ -3647,6 +4051,8 @@ dependencies = [ "clru", "data-encoding", "derivative", + "eyre", + "ferveo-common", "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", @@ -3657,6 +4063,7 @@ dependencies = [ "masp_primitives", "masp_proofs", "namada_core", + "namada_ethereum_bridge", "namada_proof_of_stake", "parity-wasm", "paste", @@ -3664,8 +4071,11 @@ dependencies = [ "proptest", "prost", "pwasm-utils", + "rand 0.8.5", + "rand_core 0.6.4", "rayon", "rust_decimal", + "serde 1.0.147", "serde_json", "sha2 0.9.9", "tempfile", @@ -3692,10 +4102,11 @@ dependencies = [ [[package]] name = "namada_apps" -version = "0.12.0" +version = "0.12.1" dependencies = [ "ark-serialize", "ark-std", + "assert_matches", "async-std", "async-trait", "base64 0.13.1", @@ -3706,12 +4117,15 @@ dependencies = [ "borsh", "byte-unit", "byteorder", + "bytes 1.2.1", "clap", + "clarity", "color-eyre", "config", "data-encoding", "derivative", "ed25519-consensus", + "ethabi", "eyre", "ferveo", "ferveo-common", @@ -3719,14 +4133,18 @@ dependencies = [ "flate2", "futures 0.3.25", "git2", + "index-set", "itertools", "libc", "libloading", "masp_primitives", "masp_proofs", + "message-io", "namada", "num-derive", + "num-rational 0.4.1", "num-traits 0.2.15", + "num256", "num_cpus", "once_cell", "orion", @@ -3743,9 +4161,11 @@ dependencies = [ "rpassword", "rust_decimal", "rust_decimal_macros", + "semver 1.0.14", "serde 1.0.147", "serde_bytes", "serde_json", + "serde_regex", "sha2 0.9.9", "signal-hook", "sparse-merkle-tree", @@ -3772,13 +4192,15 @@ dependencies = [ "tracing 0.1.37", "tracing-log", "tracing-subscriber 0.3.16", + "warp", + "web30", "websocket", "winapi 0.3.9", ] [[package]] name = "namada_core" -version = "0.12.0" +version = "0.12.1" dependencies = [ "ark-bls12-381", "ark-ec", @@ -3791,6 +4213,8 @@ dependencies = [ "data-encoding", "derivative", "ed25519-consensus", + "ethabi", + "eyre", "ferveo", "ferveo-common", "group-threshold-cryptography", @@ -3803,6 +4227,7 @@ dependencies = [ "itertools", "libsecp256k1", "masp_primitives", + "num-rational 0.4.1", "pretty_assertions", "proptest", "prost", @@ -3822,6 +4247,7 @@ dependencies = [ "tendermint-proto 0.23.6", "test-log", "thiserror", + "tiny-keccak", "tonic-build", "tracing 0.1.37", "tracing-subscriber 0.3.16", @@ -3830,7 +4256,7 @@ dependencies = [ [[package]] name = "namada_encoding_spec" -version = "0.12.0" +version = "0.12.1" dependencies = [ "borsh", "itertools", @@ -3839,9 +4265,31 @@ dependencies = [ "namada", ] +[[package]] +name = "namada_ethereum_bridge" +version = "0.11.0" +dependencies = [ + "assert_matches", + "borsh", + "eyre", + "itertools", + "namada_core", + "namada_proof_of_stake", + "serde 1.0.147", + "serde_json", + "tendermint 0.23.5", + "tendermint 0.23.6", + "tendermint-proto 0.23.5", + "tendermint-proto 0.23.6", + "tendermint-rpc 0.23.5", + "tendermint-rpc 0.23.6", + "toml", + "tracing 0.1.37", +] + [[package]] name = "namada_macros" -version = "0.12.0" +version = "0.12.1" dependencies = [ "quote", "syn", @@ -3849,21 +4297,24 @@ dependencies = [ [[package]] name = "namada_proof_of_stake" -version = "0.12.0" +version = "0.12.1" dependencies = [ "borsh", "derivative", + "ferveo-common", "namada_core", "proptest", "rust_decimal", "rust_decimal_macros", + "tendermint-proto 0.23.5", + "tendermint-proto 0.23.6", "thiserror", "tracing 0.1.37", ] [[package]] name = "namada_tests" -version = "0.12.0" +version = "0.12.1" dependencies = [ "assert_cmd", "borsh", @@ -3907,7 +4358,7 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.12.0" +version = "0.12.1" dependencies = [ "borsh", "masp_primitives", @@ -3922,7 +4373,7 @@ dependencies = [ [[package]] name = "namada_vm_env" -version = "0.12.0" +version = "0.12.1" dependencies = [ "borsh", "hex", @@ -3933,7 +4384,7 @@ dependencies = [ [[package]] name = "namada_vp_prelude" -version = "0.12.0" +version = "0.12.1" dependencies = [ "borsh", "namada_core", @@ -4065,17 +4516,42 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "num" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8536030f9fea7127f841b45bb6243b27255787fb4eb83958aa1ef9d2fdc0c36" +dependencies = [ + "num-bigint 0.2.6", + "num-complex 0.2.4", + "num-integer", + "num-iter", + "num-rational 0.2.4", + "num-traits 0.2.15", +] + [[package]] name = "num" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" dependencies = [ - "num-bigint", - "num-complex", + "num-bigint 0.4.3", + "num-complex 0.4.2", "num-integer", "num-iter", - "num-rational", + "num-rational 0.4.1", + "num-traits 0.2.15", +] + +[[package]] +name = "num-bigint" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +dependencies = [ + "autocfg 1.1.0", + "num-integer", "num-traits 0.2.15", ] @@ -4091,6 +4567,16 @@ dependencies = [ "serde 1.0.147", ] +[[package]] +name = "num-complex" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" +dependencies = [ + "autocfg 1.1.0", + "num-traits 0.2.15", +] + [[package]] name = "num-complex" version = "0.4.2" @@ -4125,9 +4611,21 @@ dependencies = [ name = "num-iter" version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +dependencies = [ + "autocfg 1.1.0", + "num-integer", + "num-traits 0.2.15", +] + +[[package]] +name = "num-rational" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" dependencies = [ "autocfg 1.1.0", + "num-bigint 0.2.6", "num-integer", "num-traits 0.2.15", ] @@ -4139,7 +4637,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" dependencies = [ "autocfg 1.1.0", - "num-bigint", + "num-bigint 0.4.3", "num-integer", "num-traits 0.2.15", "serde 1.0.147", @@ -4163,6 +4661,20 @@ dependencies = [ "autocfg 1.1.0", ] +[[package]] +name = "num256" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9b5179e82f0867b23e0b9b822493821f9345561f271364f409c8e4a058367d" +dependencies = [ + "lazy_static", + "num 0.4.0", + "num-derive", + "num-traits 0.2.15", + "serde 1.0.147", + "serde_derive", +] + [[package]] name = "num_cpus" version = "1.14.0" @@ -4266,7 +4778,7 @@ dependencies = [ "aes", "arrayvec 0.7.2", "bigint", - "bitvec", + "bitvec 0.22.3", "blake2b_simd 1.0.0", "ff", "fpe", @@ -4332,6 +4844,32 @@ dependencies = [ "group", ] +[[package]] +name = "parity-scale-codec" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "366e44391a8af4cfd6002ef6ba072bae071a96aafca98d7d448a34c5dca38b6a" +dependencies = [ + "arrayvec 0.7.2", + "bitvec 1.0.1", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde 1.0.147", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" +dependencies = [ + "proc-macro-crate 1.2.1", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "parity-wasm" version = "0.45.0" @@ -4627,6 +5165,19 @@ dependencies = [ "output_vt100", ] +[[package]] +name = "primitive-types" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e28720988bff275df1f51b171e1b2a18c30d194c4d2b61defdacecd625a5d94a" +dependencies = [ + "fixed-hash", + "impl-codec", + "impl-rlp", + "impl-serde", + "uint", +] + [[package]] name = "proc-macro-crate" version = "0.1.5" @@ -4636,6 +5187,17 @@ dependencies = [ "toml", ] +[[package]] +name = "proc-macro-crate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" +dependencies = [ + "once_cell", + "thiserror", + "toml", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -4705,7 +5267,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" dependencies = [ "bytes 1.2.1", - "heck", + "heck 0.3.3", "itertools", "lazy_static", "log 0.4.17", @@ -4834,6 +5396,12 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + [[package]] name = "rand" version = "0.6.5" @@ -5287,6 +5855,16 @@ dependencies = [ "libc", ] +[[package]] +name = "rlp" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" +dependencies = [ + "bytes 1.2.1", + "rustc-hex", +] + [[package]] name = "rocksdb" version = "0.19.0" @@ -5347,6 +5925,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + [[package]] name = "rustc_version" version = "0.2.3" @@ -5365,6 +5949,15 @@ dependencies = [ "semver 0.11.0", ] +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver 1.0.14", +] + [[package]] name = "rustls" version = "0.19.1" @@ -5402,6 +5995,15 @@ dependencies = [ "security-framework", ] +[[package]] +name = "rustls-pemfile" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" +dependencies = [ + "base64 0.13.1", +] + [[package]] name = "rustversion" version = "1.0.9" @@ -5507,6 +6109,12 @@ dependencies = [ "parking_lot 0.12.1", ] +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + [[package]] name = "scopeguard" version = "1.1.0" @@ -5577,6 +6185,15 @@ dependencies = [ "serde 1.0.147", ] +[[package]] +name = "secp256k1" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff55dc09d460954e9ef2fa8a7ced735a964be9981fd50e870b2b3b0705e14964" +dependencies = [ + "secp256k1-sys 0.6.1", +] + [[package]] name = "secp256k1-sys" version = "0.4.2" @@ -5595,6 +6212,15 @@ dependencies = [ "cc", ] +[[package]] +name = "secp256k1-sys" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83080e2c2fc1006e625be82e5d1eb6a43b7fd9578b617fcc55814daf286bba4b" +dependencies = [ + "cc", +] + [[package]] name = "security-framework" version = "2.3.1" @@ -5687,6 +6313,18 @@ dependencies = [ "serde 0.8.23", ] +[[package]] +name = "serde-rlp" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69472f967577700225f282233c0625f7b73c371c3953b72d6dcfb91bd0133ca9" +dependencies = [ + "byteorder", + "error", + "num 0.2.1", + "serde 1.0.147", +] + [[package]] name = "serde_bytes" version = "0.11.7" @@ -5728,6 +6366,16 @@ dependencies = [ "serde 1.0.147", ] +[[package]] +name = "serde_regex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8136f1a4ea815d7eac4101cfd0b16dc0cb5e1fe1b8609dfd728058656b7badf" +dependencies = [ + "regex", + "serde 1.0.147", +] + [[package]] name = "serde_repr" version = "0.1.9" @@ -5788,6 +6436,17 @@ dependencies = [ "opaque-debug 0.3.0", ] +[[package]] +name = "sha-1" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.10.5", +] + [[package]] name = "sha1" version = "0.10.5" @@ -5835,6 +6494,16 @@ dependencies = [ "opaque-debug 0.3.0", ] +[[package]] +name = "sha3" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" +dependencies = [ + "digest 0.10.5", + "keccak", +] + [[package]] name = "sharded-slab" version = "0.1.4" @@ -5986,10 +6655,32 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "strum" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" +dependencies = [ + "heck 0.4.0", + "proc-macro2", + "quote", + "rustversion", + "syn", +] + [[package]] name = "subproductdomain" version = "0.1.0" -source = "git+https://github.com/anoma/ferveo#1022ab2c7ccc689abcc05e5a08df6fb0c2a3fc65" +source = "git+https://github.com/anoma/ferveo?rev=9e5e91c954158e7cff45c483fd06cd649a81553f#9e5e91c954158e7cff45c483fd06cd649a81553f" dependencies = [ "anyhow", "ark-ec", @@ -6397,18 +7088,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.30" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.30" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" dependencies = [ "proc-macro2", "quote", @@ -6611,6 +7302,18 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-openssl" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08f9ffb7809f1b20c1b398d92acf4cc719874b3b2b2d9ea2f09b4a80350878a" +dependencies = [ + "futures-util", + "openssl", + "openssl-sys", + "tokio", +] + [[package]] name = "tokio-reactor" version = "0.1.12" @@ -6700,6 +7403,18 @@ dependencies = [ "tokio-io", ] +[[package]] +name = "tokio-tungstenite" +version = "0.17.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" +dependencies = [ + "futures-util", + "log 0.4.17", + "tokio", + "tungstenite 0.17.3", +] + [[package]] name = "tokio-util" version = "0.6.10" @@ -7043,6 +7758,53 @@ dependencies = [ "utf-8", ] +[[package]] +name = "tungstenite" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ad3713a14ae247f22a728a0456a545df14acf3867f905adff84be99e23b3ad1" +dependencies = [ + "base64 0.13.1", + "byteorder", + "bytes 1.2.1", + "http", + "httparse", + "log 0.4.17", + "rand 0.8.5", + "sha-1 0.9.8", + "thiserror", + "url 2.3.1", + "utf-8", +] + +[[package]] +name = "tungstenite" +version = "0.17.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" +dependencies = [ + "base64 0.13.1", + "byteorder", + "bytes 1.2.1", + "http", + "httparse", + "log 0.4.17", + "rand 0.8.5", + "sha-1 0.10.0", + "thiserror", + "url 2.3.1", + "utf-8", +] + +[[package]] +name = "twoway" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" +dependencies = [ + "memchr", +] + [[package]] name = "typeable" version = "0.1.2" @@ -7324,6 +8086,37 @@ dependencies = [ "try-lock", ] +[[package]] +name = "warp" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed7b8be92646fc3d18b06147664ebc5f48d222686cb11a8755e561a735aacc6d" +dependencies = [ + "bytes 1.2.1", + "futures-channel", + "futures-util", + "headers", + "http", + "hyper 0.14.23", + "log 0.4.17", + "mime 0.3.16", + "mime_guess", + "multipart", + "percent-encoding 2.2.0", + "pin-project", + "rustls-pemfile", + "scoped-tls", + "serde 1.0.147", + "serde_json", + "serde_urlencoded", + "tokio", + "tokio-stream", + "tokio-tungstenite", + "tokio-util 0.7.4", + "tower-service", + "tracing 0.1.37", +] + [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" @@ -7684,6 +8477,25 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web30" +version = "0.19.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "426f817a02df256fec6bff3ec5ef3859204658774af9cd5ef2525ca8d50f6f2c" +dependencies = [ + "awc", + "clarity", + "futures 0.3.25", + "lazy_static", + "log 0.4.17", + "num 0.4.0", + "num256", + "serde 1.0.147", + "serde_derive", + "serde_json", + "tokio", +] + [[package]] name = "webpki" version = "0.21.4" @@ -7997,6 +8809,15 @@ dependencies = [ "tap", ] +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + [[package]] name = "xattr" version = "0.2.3" @@ -8054,7 +8875,7 @@ source = "git+https://github.com/zcash/librustzcash/?rev=2425a08#2425a0869098e3b dependencies = [ "aes", "bip0039", - "bitvec", + "bitvec 0.22.3", "blake2b_simd 1.0.0", "blake2s_simd 1.0.0", "bls12_381", @@ -8118,6 +8939,25 @@ dependencies = [ "synstructure", ] +[[package]] +name = "zstd" +version = "0.11.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "5.0.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" +dependencies = [ + "libc", + "zstd-sys", +] + [[package]] name = "zstd-sys" version = "2.0.1+zstd.1.5.2" diff --git a/Cargo.toml b/Cargo.toml index 42a99343fc..e79c08c748 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ resolver = "2" members = [ "apps", "core", + "ethereum_bridge", "proof_of_stake", "shared", "tests", diff --git a/Makefile b/Makefile index 58329ea22b..b26660593d 100644 --- a/Makefile +++ b/Makefile @@ -25,6 +25,9 @@ build-test: build-release: NAMADA_DEV=false $(cargo) build --release --package namada_apps --manifest-path Cargo.toml +build-debug: + ANOMA_DEV=false $(cargo) build --package namada_apps --manifest-path Cargo.toml + install-release: NAMADA_DEV=false $(cargo) install --path ./apps --locked @@ -65,17 +68,14 @@ clippy-abcipp: $(cargo) +$(nightly) clippy --all-targets \ --manifest-path ./proof_of_stake/Cargo.toml \ --features "testing" && \ + $(cargo) +$(nightly) clippy --all-targets \ + --manifest-path ./core/Cargo.toml \ + --no-default-features \ + --features "testing wasm-runtime abcipp ibc-mocks-abcipp ferveo-tpke" $(cargo) +$(nightly) clippy --all-targets \ --manifest-path ./shared/Cargo.toml \ --no-default-features \ - --features "testing wasm-runtime abcipp ibc-mocks-abcipp ferveo-tpke" && \ - $(cargo) +$(nightly) clippy \ - --all-targets \ - --manifest-path ./vm_env/Cargo.toml \ - --no-default-features && \ - make -C $(wasms) clippy && \ - $(foreach wasm,$(wasm_templates),$(clippy-wasm) && ) true - + --features "testing wasm-runtime abcipp ibc-mocks-abcipp ferveo-tpke" clippy-fix: $(cargo) +$(nightly) clippy --fix -Z unstable-options --all-targets --allow-dirty --allow-staged @@ -124,16 +124,22 @@ test-unit-abcipp: --features "testing" \ $(TEST_FILTER) -- \ -Z unstable-options --report-time && \ + $(cargo) test \ + --manifest-path ./core/Cargo.toml \ + --no-default-features \ + --features "testing wasm-runtime abcipp ibc-mocks-abcipp ferveo-tpke" \ + $(TEST_FILTER) -- \ + -Z unstable-options --report-time && \ $(cargo) test \ --manifest-path ./shared/Cargo.toml \ --no-default-features \ - --features "testing wasm-runtime abcipp ibc-mocks-abcipp" \ + --features "testing wasm-runtime abcipp ibc-mocks-abcipp ferveo-tpke" \ $(TEST_FILTER) -- \ -Z unstable-options --report-time && \ $(cargo) test \ --manifest-path ./vm_env/Cargo.toml \ --no-default-features \ - --features "abcipp" \ + --features "namada_core/abcipp" \ $(TEST_FILTER) -- \ -Z unstable-options --report-time diff --git a/apps/Cargo.toml b/apps/Cargo.toml index bba5ca030e..a97ff7793b 100644 --- a/apps/Cargo.toml +++ b/apps/Cargo.toml @@ -6,7 +6,7 @@ license = "GPL-3.0" name = "namada_apps" readme = "../README.md" resolver = "2" -version = "0.12.0" +version = "0.12.1" default-run = "namada" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -39,13 +39,18 @@ doc = false name = "namadaw" path = "src/bin/namada-wallet/main.rs" +# Namada relayer +[[bin]] +doc = false +name = "namadar" +path = "src/bin/namada-relayer/main.rs" + [features] default = ["std", "abciplus"] dev = ["namada/dev"] std = ["ed25519-consensus/std", "rand/std", "rand_core/std"] # for integration tests and test utilies testing = ["dev"] - abcipp = [ "namada/abcipp", "namada/tendermint-rpc-abcipp", @@ -54,6 +59,7 @@ abcipp = [ "tendermint-proto-abcipp", "tendermint-rpc-abcipp", "tower-abci-abcipp", + "namada/tendermint-abcipp" ] abciplus = [ @@ -82,21 +88,27 @@ byte-unit = "4.0.13" byteorder = "1.4.2" # https://github.com/clap-rs/clap/issues/1037 clap = {git = "https://github.com/clap-rs/clap/", tag = "v3.0.0-beta.2", default-features = false, features = ["std", "suggestions", "color", "cargo"]} +clarity = "0.5.1" color-eyre = "0.5.10" config = "0.11.0" data-encoding = "2.3.2" derivative = "2.2.0" ed25519-consensus = "1.2.0" -ferveo = {git = "https://github.com/anoma/ferveo"} -ferveo-common = {git = "https://github.com/anoma/ferveo"} +ethabi = "17.0.0" +ferveo = {git = "https://github.com/anoma/ferveo", rev = "9e5e91c954158e7cff45c483fd06cd649a81553f"} +ferveo-common = {git = "https://github.com/anoma/ferveo", rev = "9e5e91c954158e7cff45c483fd06cd649a81553f"} eyre = "0.6.5" flate2 = "1.0.22" file-lock = "2.0.2" futures = "0.3" +index-set = {git = "https://github.com/heliaxdev/index-set", tag = "v0.7.1"} itertools = "0.10.1" libc = "0.2.97" libloading = "0.7.2" +message-io = {version = "0.14.3", default-features = false, features = ["websocket"]} +num256 = "0.3.5" num-derive = "0.3.3" +num-rational = "0.4.1" num-traits = "0.2.14" num_cpus = "1.13.0" once_cell = "1.8.0" @@ -114,6 +126,8 @@ rpassword = "5.0.1" serde = {version = "1.0.125", features = ["derive"]} serde_bytes = "0.11.5" serde_json = {version = "1.0.62", features = ["raw_value"]} +serde_regex = "1.1.0" +semver = "1.0.14" sha2 = "0.9.3" signal-hook = "0.3.9" # sysinfo with disabled multithread feature @@ -140,6 +154,7 @@ tower-abci = {version = "0.1.0", optional = true} tracing = "0.1.30" tracing-log = "0.1.2" tracing-subscriber = {version = "0.3.7", features = ["env-filter"]} +web30 = "0.19.1" websocket = "0.26.2" winapi = "0.3.9" #libmasp = { git = "https://github.com/anoma/masp", branch = "murisi/masp-incentive" } @@ -149,7 +164,11 @@ bimap = {version = "0.6.2", features = ["serde"]} rust_decimal = "1.26.1" rust_decimal_macros = "1.26.1" +warp = "0.3.2" +bytes = "1.1.0" + [dev-dependencies] +assert_matches = "1.5.0" namada = {path = "../shared", default-features = false, features = ["testing", "wasm-runtime"]} bit-set = "0.5.2" # A fork with state machime testing diff --git a/apps/src/bin/namada-client/cli.rs b/apps/src/bin/namada-client/cli.rs index 770928c7c0..a5cc70451b 100644 --- a/apps/src/bin/namada-client/cli.rs +++ b/apps/src/bin/namada-client/cli.rs @@ -3,7 +3,7 @@ use color_eyre::eyre::Result; use namada_apps::cli; use namada_apps::cli::cmds::*; -use namada_apps::client::{rpc, tx, utils}; +use namada_apps::client::{eth_bridge_pool, rpc, tx, utils}; pub async fn main() -> Result<()> { match cli::namada_client_cli()? { @@ -48,6 +48,10 @@ pub async fn main() -> Result<()> { Sub::Withdraw(Withdraw(args)) => { tx::submit_withdraw(ctx, args).await; } + // Eth bridge pool + Sub::AddToEthBridgePool(args) => { + eth_bridge_pool::add_to_eth_bridge_pool(ctx, args.0).await; + } // Ledger queries Sub::QueryEpoch(QueryEpoch(args)) => { rpc::query_epoch(args).await; diff --git a/apps/src/bin/namada-relayer/cli.rs b/apps/src/bin/namada-relayer/cli.rs new file mode 100644 index 0000000000..16576a2a82 --- /dev/null +++ b/apps/src/bin/namada-relayer/cli.rs @@ -0,0 +1,20 @@ +//! Namada client CLI. + +use color_eyre::eyre::Result; +use namada_apps::cli; +use namada_apps::cli::cmds; +use namada_apps::client::eth_bridge_pool; + +pub async fn main() -> Result<()> { + let (cmd, _) = cli::namada_relayer_cli()?; + use cmds::EthBridgePool as Sub; + match cmd { + Sub::ConstructProof(args) => { + eth_bridge_pool::construct_bridge_pool_proof(args).await; + } + Sub::QueryPool(query) => { + eth_bridge_pool::query_bridge_pool(query).await; + } + } + Ok(()) +} diff --git a/apps/src/bin/namada-relayer/main.rs b/apps/src/bin/namada-relayer/main.rs new file mode 100644 index 0000000000..73876fe7d2 --- /dev/null +++ b/apps/src/bin/namada-relayer/main.rs @@ -0,0 +1,17 @@ +mod cli; + +use color_eyre::eyre::Result; +use namada_apps::logging; +use tracing_subscriber::filter::LevelFilter; + +#[tokio::main] +async fn main() -> Result<()> { + // init error reporting + color_eyre::install()?; + + // init logging + logging::init_from_env_or(LevelFilter::INFO)?; + + // run the CLI + cli::main().await +} diff --git a/apps/src/bin/namada/cli.rs b/apps/src/bin/namada/cli.rs index 33ac5653f1..88d09da0cf 100644 --- a/apps/src/bin/namada/cli.rs +++ b/apps/src/bin/namada/cli.rs @@ -53,6 +53,9 @@ fn handle_command(cmd: cli::cmds::Namada, raw_sub_cmd: String) -> Result<()> { handle_subcommand("namadac", sub_args) } cli::cmds::Namada::Wallet(_) => handle_subcommand("namadaw", sub_args), + cli::cmds::Namada::EthBridgePool(_) => { + handle_subcommand("namadar", sub_args) + } } } diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index aa240b704b..9607d3e276 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -24,12 +24,14 @@ const APP_NAME: &str = "Namada"; const NODE_CMD: &str = "node"; const CLIENT_CMD: &str = "client"; const WALLET_CMD: &str = "wallet"; +const BRIDGE_POOL_CMD: &str = "ethereum-bridge-pool"; pub mod cmds { use clap::AppSettings; use super::utils::*; use super::{args, ArgMatches, CLIENT_CMD, NODE_CMD, WALLET_CMD}; + use crate::cli::BRIDGE_POOL_CMD; /// Commands for `namada` binary. #[allow(clippy::large_enum_variant)] @@ -41,6 +43,7 @@ pub mod cmds { Wallet(NamadaWallet), // Inlined commands from the node. + EthBridgePool(EthBridgePool), Ledger(Ledger), // Inlined commands from the client. @@ -58,6 +61,7 @@ pub mod cmds { app.subcommand(NamadaNode::def()) .subcommand(NamadaClient::def()) .subcommand(NamadaWallet::def()) + .subcommand(EthBridgePool::def()) .subcommand(Ledger::def()) .subcommand(TxCustom::def()) .subcommand(TxTransfer::def()) @@ -165,6 +169,8 @@ pub mod cmds { .subcommand(Bond::def().display_order(2)) .subcommand(Unbond::def().display_order(2)) .subcommand(Withdraw::def().display_order(2)) + // Ethereum bridge pool + .subcommand(AddToEthBridgePool::def().display_order(3)) // Queries .subcommand(QueryEpoch::def().display_order(3)) .subcommand(QueryTransfers::def().display_order(3)) @@ -217,6 +223,8 @@ pub mod cmds { Self::parse_with_ctx(matches, QueryProposalResult); let query_protocol_parameters = Self::parse_with_ctx(matches, QueryProtocolParameters); + let add_to_eth_bridge_pool = + Self::parse_with_ctx(matches, AddToEthBridgePool); let utils = SubCmd::parse(matches).map(Self::WithoutContext); tx_custom .or(tx_transfer) @@ -230,6 +238,7 @@ pub mod cmds { .or(bond) .or(unbond) .or(withdraw) + .or(add_to_eth_bridge_pool) .or(query_epoch) .or(query_transfers) .or(query_conversions) @@ -292,6 +301,7 @@ pub mod cmds { Bond(Bond), Unbond(Unbond), Withdraw(Withdraw), + AddToEthBridgePool(AddToEthBridgePool), QueryEpoch(QueryEpoch), QueryTransfers(QueryTransfers), QueryConversions(QueryConversions), @@ -1503,10 +1513,116 @@ pub mod cmds { .add_args::() } } + + /// Used as sub-commands (`SubCmd` instance) in `namada` binary. + #[derive(Clone, Debug)] + pub enum EthBridgePool { + /// Construct a proof that a set of transfers is in the pool. + /// This can be used to relay transfers across the + /// bridge to Ethereum. + ConstructProof(args::BridgePoolProof), + /// Query the contents of the pool. + QueryPool(args::Query), + } + + impl Cmd for EthBridgePool { + fn add_sub(app: App) -> App { + app.subcommand(ConstructProof::def().display_order(1)) + .subcommand(QueryEthBridgePool::def().display_order(1)) + } + + fn parse(matches: &ArgMatches) -> Option { + let construct_proof = ConstructProof::parse(matches) + .map(|proof| Self::ConstructProof(proof.0)); + let query_pool = QueryEthBridgePool::parse(matches) + .map(|q| Self::QueryPool(q.0)); + construct_proof.or(query_pool) + } + } + + impl SubCmd for EthBridgePool { + const CMD: &'static str = BRIDGE_POOL_CMD; + + fn parse(matches: &ArgMatches) -> Option { + Cmd::parse(matches) + } + + fn def() -> App { + App::new(Self::CMD) + .about( + "Functionality for interacting with the Ethereum bridge \ + pool. This pool holds transfers waiting to be relayed to \ + Ethereum.", + ) + .subcommand(ConstructProof::def().display_order(1)) + .subcommand(QueryEthBridgePool::def().display_order(1)) + } + } + + #[derive(Clone, Debug)] + pub struct AddToEthBridgePool(pub args::EthereumBridgePool); + + impl SubCmd for AddToEthBridgePool { + const CMD: &'static str = "add-erc20-transfer"; + + fn parse(matches: &ArgMatches) -> Option { + matches + .subcommand_matches(Self::CMD) + .map(|matches| Self(args::EthereumBridgePool::parse(matches))) + } + + fn def() -> App { + App::new(Self::CMD) + .about("Add a new transfer to the Ethereum bridge pool.") + .add_args::() + } + } + + #[derive(Clone, Debug)] + pub struct ConstructProof(pub args::BridgePoolProof); + + impl SubCmd for ConstructProof { + const CMD: &'static str = "construct-proof"; + + fn parse(matches: &ArgMatches) -> Option { + matches + .subcommand_matches(Self::CMD) + .map(|matches| Self(args::BridgePoolProof::parse(matches))) + } + + fn def() -> App { + App::new(Self::CMD) + .about( + "Construct a merkle proof that the given transfer is in \ + the pool.", + ) + .add_args::() + } + } + + #[derive(Clone, Debug)] + pub struct QueryEthBridgePool(args::Query); + + impl SubCmd for QueryEthBridgePool { + const CMD: &'static str = "query"; + + fn parse(matches: &ArgMatches) -> Option { + matches + .subcommand_matches(Self::CMD) + .map(|matches| Self(args::Query::parse(matches))) + } + + fn def() -> App { + App::new(Self::CMD) + .about("Get the contents of the Ethereum bridge pool.") + .add_args::() + } + } } pub mod args { + use std::convert::TryFrom; use std::env; use std::net::SocketAddr; use std::path::PathBuf; @@ -1515,11 +1631,14 @@ pub mod args { use namada::ibc::core::ics24_host::identifier::{ChannelId, PortId}; use namada::types::address::Address; use namada::types::chain::{ChainId, ChainIdPrefix}; + use namada::types::ethereum_events::EthAddress; use namada::types::governance::ProposalVote; + use namada::types::keccak::KeccakHash; use namada::types::key::*; use namada::types::masp::MaspValue; use namada::types::storage::{self, Epoch}; use namada::types::token; + use namada::types::token::Amount; use namada::types::transaction::GasLimit; use rust_decimal::Decimal; @@ -1562,10 +1681,15 @@ pub mod args { const DATA_PATH: Arg = arg("data-path"); const DECRYPT: ArgFlag = flag("decrypt"); const DONT_ARCHIVE: ArgFlag = flag("dont-archive"); + const DONT_PREFETCH_WASM: ArgFlag = flag("dont-prefetch-wasm"); const DRY_RUN_TX: ArgFlag = flag("dry-run"); const EPOCH: ArgOpt = arg_opt("epoch"); + const ERC20: Arg = arg("erc20"); + const ETH_ADDRESS: Arg = arg("ethereum-address"); + const FEE_AMOUNT: ArgDefault = + arg_default("fee-amount", DefaultFn(|| token::Amount::from(0))); + const FEE_PAYER: Arg = arg("fee-payer"); const FORCE: ArgFlag = flag("force"); - const DONT_PREFETCH_WASM: ArgFlag = flag("dont-prefetch-wasm"); const GAS_AMOUNT: ArgDefault = arg_default("gas-amount", DefaultFn(|| token::Amount::from(0))); const GAS_LIMIT: ArgDefault = @@ -1574,6 +1698,7 @@ pub mod args { arg_default_from_ctx("gas-token", DefaultFn(|| "NAM".into())); const GENESIS_PATH: Arg = arg("genesis-path"); const GENESIS_VALIDATOR: ArgOpt = arg("genesis-validator").opt(); + const HASH_LIST: Arg = arg("hash-list"); const LEDGER_ADDRESS_ABOUT: &str = "Address of a ledger node as \"{scheme}://{host}:{port}\". If the \ scheme is not supplied, it is assumed to be TCP."; @@ -1632,6 +1757,9 @@ pub mod args { arg_opt("account-key"); const VALIDATOR_CONSENSUS_KEY: ArgOpt = arg_opt("consensus-key"); + const VALIDATOR_ETH_COLD_KEY: ArgOpt = + arg_opt("eth-cold-key"); + const VALIDATOR_ETH_HOT_KEY: ArgOpt = arg_opt("eth-hot-key"); const VALIDATOR_CODE_PATH: ArgOpt = arg_opt("validator-code-path"); const VALUE: ArgOpt = arg_opt("value"); const VIEWING_KEY: Arg = arg("key"); @@ -1711,6 +1839,112 @@ pub mod args { } } + /// A transfer to be added to the Ethereum bridge pool. + #[derive(Clone, Debug)] + pub struct EthereumBridgePool { + /// The args for building a tx to the bridge pool + pub tx: Tx, + /// The type of token + pub asset: EthAddress, + /// The recipient address + pub recipient: EthAddress, + /// The sender of the transfer + pub sender: WalletAddress, + /// The amount to be transferred + pub amount: Amount, + /// The amount of fees (in NAM) + pub gas_amount: Amount, + /// The account of fee payer. + pub gas_payer: WalletAddress, + } + + impl Args for EthereumBridgePool { + fn parse(matches: &ArgMatches) -> Self { + let tx = Tx::parse(matches); + let asset = ERC20.parse(matches); + let recipient = ETH_ADDRESS.parse(matches); + let sender = ADDRESS.parse(matches); + let amount = AMOUNT.parse(matches); + let gas_amount = FEE_AMOUNT.parse(matches); + let gas_payer = FEE_PAYER.parse(matches); + Self { + tx, + asset, + recipient, + sender, + amount, + gas_amount, + gas_payer, + } + } + + fn def(app: App) -> App { + app.add_args::() + .arg( + ERC20 + .def() + .about("The Ethereum address of the ERC20 token."), + ) + .arg( + ETH_ADDRESS + .def() + .about("The Ethereum address receiving the tokens."), + ) + .arg( + ADDRESS + .def() + .about("The Namada address sending the tokens."), + ) + .arg(AMOUNT.def().about( + "The amount of tokens being sent across the bridge.", + )) + .arg(FEE_AMOUNT.def().about( + "The amount of NAM you wish to pay to have this transfer \ + relayed to Ethereum.", + )) + .arg( + FEE_PAYER.def().about( + "The Namada address of the account paying the fee.", + ), + ) + } + } + + #[derive(Debug, Clone)] + pub struct BridgePoolProof { + /// The query parameters. + pub query: Query, + pub transfers: Vec, + } + + impl Args for BridgePoolProof { + fn parse(matches: &ArgMatches) -> Self { + let query = Query::parse(matches); + let hashes = HASH_LIST.parse(matches); + Self { + query, + transfers: hashes + .split(' ') + .map(|hash| { + KeccakHash::try_from(hash).unwrap_or_else(|_| { + tracing::info!( + "Could not parse '{}' as a Keccak hash.", + hash + ); + safe_exit(1) + }) + }) + .collect(), + } + } + + fn def(app: App) -> App { + app.add_args::().arg(HASH_LIST.def().about( + "List of Keccak hashes of transfers in the bridge pool.", + )) + } + } + /// Custom transaction arguments #[derive(Clone, Debug)] pub struct TxCustom { @@ -1941,6 +2175,8 @@ pub mod args { pub scheme: SchemeType, pub account_key: Option, pub consensus_key: Option, + pub eth_cold_key: Option, + pub eth_hot_key: Option, pub protocol_key: Option, pub commission_rate: Decimal, pub max_commission_rate_change: Decimal, @@ -1955,6 +2191,8 @@ pub mod args { let scheme = SCHEME.parse(matches); let account_key = VALIDATOR_ACCOUNT_KEY.parse(matches); let consensus_key = VALIDATOR_CONSENSUS_KEY.parse(matches); + let eth_cold_key = VALIDATOR_ETH_COLD_KEY.parse(matches); + let eth_hot_key = VALIDATOR_ETH_HOT_KEY.parse(matches); let protocol_key = PROTOCOL_KEY.parse(matches); let commission_rate = COMMISSION_RATE.parse(matches); let max_commission_rate_change = @@ -1967,6 +2205,8 @@ pub mod args { scheme, account_key, consensus_key, + eth_cold_key, + eth_hot_key, protocol_key, commission_rate, max_commission_rate_change, @@ -1990,7 +2230,18 @@ pub mod args { )) .arg(VALIDATOR_CONSENSUS_KEY.def().about( "A consensus key for the validator account. A new one \ - will be generated if none given.", + will be generated if none given. Note that this must be \ + ed25519.", + )) + .arg(VALIDATOR_ETH_COLD_KEY.def().about( + "An Eth cold key for the validator account. A new one \ + will be generated if none given. Note that this must be \ + secp256k1.", + )) + .arg(VALIDATOR_ETH_HOT_KEY.def().about( + "An Eth hot key for the validator account. A new one will \ + be generated if none given. Note that this must be \ + secp256k1.", )) .arg(PROTOCOL_KEY.def().about( "A public key for signing protocol transactions. A new \ @@ -2720,7 +2971,7 @@ pub mod args { /// save it in the wallet. pub initialized_account_alias: Option, /// The amount being payed to include the transaction - pub fee_amount: token::Amount, + pub fee_amount: Amount, /// The token in which the fee is being paid pub fee_token: WalletAddress, /// The max amount of gas used to process tx @@ -2777,7 +3028,7 @@ pub mod args { .arg(GAS_AMOUNT.def().about( "The amount being paid for the inclusion of this transaction", )) - .arg(GAS_TOKEN.def().about("The token for paying the gas")) + .arg(GAS_TOKEN.def().about("The token for paying the fee")) .arg( GAS_LIMIT.def().about( "The maximum amount of gas needed to run transaction", @@ -3454,6 +3705,11 @@ pub fn namada_wallet_cli() -> Result<(cmds::NamadaWallet, Context)> { cmds::NamadaWallet::parse_or_print_help(app) } +pub fn namada_relayer_cli() -> Result<(cmds::EthBridgePool, Context)> { + let app = namada_relayer_app(); + cmds::EthBridgePool::parse_or_print_help(app) +} + fn namada_app() -> App { let app = App::new(APP_NAME) .version(namada_version()) @@ -3485,3 +3741,11 @@ fn namada_wallet_app() -> App { .setting(AppSettings::SubcommandRequiredElseHelp); cmds::NamadaWallet::add_sub(args::Global::def(app)) } + +fn namada_relayer_app() -> App { + let app = App::new(APP_NAME) + .version(namada_version()) + .about("Namada Ethereum bridge pool command line interface.") + .setting(AppSettings::SubcommandRequiredElseHelp); + cmds::EthBridgePool::add_sub(args::Global::def(app)) +} diff --git a/apps/src/lib/client/eth_bridge_pool.rs b/apps/src/lib/client/eth_bridge_pool.rs new file mode 100644 index 0000000000..a33c03bac4 --- /dev/null +++ b/apps/src/lib/client/eth_bridge_pool.rs @@ -0,0 +1,98 @@ +use std::collections::HashMap; + +use borsh::BorshSerialize; +use namada::ledger::queries::RPC; +use namada::proto::Tx; +use namada::types::eth_abi::Encode; +use namada::types::eth_bridge_pool::{ + GasFee, PendingTransfer, TransferToEthereum, +}; +use serde::{Deserialize, Serialize}; + +use super::signing::TxSigningKey; +use super::tx::process_tx; +use crate::cli::{args, Context}; +use crate::facade::tendermint_rpc::HttpClient; + +const ADD_TRANSFER_WASM: &str = "tx_bridge_pool.wasm"; + +/// Craft a transaction that adds a transfer to the Ethereum bridge pool. +pub async fn add_to_eth_bridge_pool( + ctx: Context, + args: args::EthereumBridgePool, +) { + let args::EthereumBridgePool { + ref tx, + asset, + recipient, + ref sender, + amount, + gas_amount, + ref gas_payer, + } = args; + let tx_code = ctx.read_wasm(ADD_TRANSFER_WASM); + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset, + recipient, + sender: ctx.get(sender), + amount, + // TODO: Add real nonce + nonce: Default::default(), + }, + gas_fee: GasFee { + amount: gas_amount, + payer: ctx.get(gas_payer), + }, + }; + let data = transfer.try_to_vec().unwrap(); + let transfer_tx = Tx::new(tx_code, Some(data)); + // this should not initialize any new addresses, so we ignore the result. + process_tx(ctx, tx, transfer_tx, TxSigningKey::None).await; +} + +/// Construct a proof that a set of transfers are in the bridge pool. +pub async fn construct_bridge_pool_proof(args: args::BridgePoolProof) { + let client = HttpClient::new(args.query.ledger_address).unwrap(); + let data = args.transfers.try_to_vec().unwrap(); + let response = RPC + .shell() + .generate_bridge_pool_proof(&client, Some(data), None, false) + .await + .unwrap(); + + println!( + "Ethereum ABI-encoded proof:\n {:#?}", + response.data.into_inner() + ); +} + +/// A json serializable representation of the Ethereum +/// bridge pool. +#[derive(Serialize, Deserialize)] +struct BridgePoolResponse { + bridge_pool_contents: HashMap, +} + +/// Query the contents of the Ethereum bridge pool. +/// Prints out a json payload. +pub async fn query_bridge_pool(args: args::Query) { + let client = HttpClient::new(args.ledger_address).unwrap(); + let response: Vec = RPC + .shell() + .read_ethereum_bridge_pool(&client) + .await + .unwrap(); + let pool_contents: HashMap = response + .into_iter() + .map(|transfer| (transfer.keccak256().to_string(), transfer)) + .collect(); + if pool_contents.is_empty() { + println!("Bridge pool is empty."); + return; + } + let contents = BridgePoolResponse { + bridge_pool_contents: pool_contents, + }; + println!("{}", serde_json::to_string_pretty(&contents).unwrap()); +} diff --git a/apps/src/lib/client/mod.rs b/apps/src/lib/client/mod.rs index 486eb3c26d..834addb08b 100644 --- a/apps/src/lib/client/mod.rs +++ b/apps/src/lib/client/mod.rs @@ -1,3 +1,4 @@ +pub mod eth_bridge_pool; pub mod rpc; pub mod signing; pub mod tendermint_rpc_types; diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index c0cc9d37db..2d1205f231 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -44,12 +44,13 @@ use namada::types::address::{masp, masp_tx_key, Address}; use namada::types::governance::{ OfflineProposal, OfflineVote, Proposal, ProposalVote, }; -use namada::types::key::*; +use namada::types::key::{self, *}; use namada::types::masp::{PaymentAddress, TransferTarget}; use namada::types::storage::{ - BlockHeight, Epoch, Key, KeySeg, TxIndex, RESERVED_ADDRESS_PREFIX, + self, BlockHeight, Epoch, Key, KeySeg, TxIndex, RESERVED_ADDRESS_PREFIX, }; use namada::types::time::DateTimeUtc; +use namada::types::token; use namada::types::token::{ Transfer, HEAD_TX_KEY, PIN_KEY_PREFIX, TX_KEY_PREFIX, }; @@ -57,7 +58,6 @@ use namada::types::transaction::governance::{ InitProposalData, VoteProposalData, }; use namada::types::transaction::{pos, InitAccount, InitValidator, UpdateVp}; -use namada::types::{storage, token}; use namada::{ledger, vm}; use rand_core::{CryptoRng, OsRng, RngCore}; use rust_decimal::Decimal; @@ -202,6 +202,8 @@ pub async fn submit_init_validator( scheme, account_key, consensus_key, + eth_cold_key, + eth_hot_key, protocol_key, commission_rate, max_commission_rate_change, @@ -217,6 +219,8 @@ pub async fn submit_init_validator( let validator_key_alias = format!("{}-key", alias); let consensus_key_alias = format!("{}-consensus-key", alias); + let eth_hot_key_alias = format!("{}-eth-hot-key", alias); + let eth_cold_key_alias = format!("{}-eth-cold-key", alias); let account_key = ctx.get_opt_cached(&account_key).unwrap_or_else(|| { println!("Generating validator account key..."); ctx.wallet @@ -250,14 +254,58 @@ pub async fn submit_init_validator( .1 }); + let eth_cold_key = ctx + .get_opt_cached(ð_cold_key) + .map(|key| match key { + common::SecretKey::Secp256k1(_) => key, + common::SecretKey::Ed25519(_) => { + eprintln!("Eth cold key can only be secp256k1"); + safe_exit(1) + } + }) + .unwrap_or_else(|| { + println!("Generating Eth cold key..."); + ctx.wallet + .gen_key( + // Note that ETH only allows secp256k1 + SchemeType::Secp256k1, + Some(eth_cold_key_alias.clone()), + unsafe_dont_encrypt, + ) + .1 + }); + + let eth_hot_key = ctx + .get_opt_cached(ð_hot_key) + .map(|key| match key { + common::SecretKey::Secp256k1(_) => key, + common::SecretKey::Ed25519(_) => { + eprintln!("Eth hot key can only be secp256k1"); + safe_exit(1) + } + }) + .unwrap_or_else(|| { + println!("Generating Eth hot key..."); + ctx.wallet + .gen_key( + // Note that ETH only allows secp256k1 + SchemeType::Secp256k1, + Some(eth_hot_key_alias.clone()), + unsafe_dont_encrypt, + ) + .1 + }); let protocol_key = ctx.get_opt_cached(&protocol_key); if protocol_key.is_none() { println!("Generating protocol signing key..."); } + let eth_hot_pk = eth_hot_key.ref_to(); // Generate the validator keys - let validator_keys = - ctx.wallet.gen_validator_keys(protocol_key, scheme).unwrap(); + let validator_keys = ctx + .wallet + .gen_validator_keys(Some(eth_hot_pk), protocol_key, scheme) + .unwrap(); let protocol_key = validator_keys.get_protocol_keypair().ref_to(); let dkg_key = validator_keys .dkg_keypair @@ -307,6 +355,14 @@ pub async fn submit_init_validator( let data = InitValidator { account_key, consensus_key: consensus_key.ref_to(), + eth_cold_key: key::secp256k1::PublicKey::try_from_pk( + ð_cold_key.ref_to(), + ) + .unwrap(), + eth_hot_key: key::secp256k1::PublicKey::try_from_pk( + ð_hot_key.ref_to(), + ) + .unwrap(), protocol_key, dkg_key, commission_rate, @@ -722,7 +778,7 @@ impl ShieldedContext { /// associated to notes, memos, and diversifiers. And the set of notes that /// we have spent are updated. The witness map is maintained to make it /// easier to construct note merkle paths in other code. See - /// https://zips.z.cash/protocol/protocol.pdf#scan + /// pub fn scan_tx( &mut self, height: BlockHeight, @@ -2531,7 +2587,7 @@ pub async fn submit_validator_commission_change( /// Submit transaction and wait for result. Returns a list of addresses /// initialized in the transaction if any. In dry run, this is always empty. -async fn process_tx( +pub async fn process_tx( ctx: Context, args: &args::Tx, tx: Tx, diff --git a/apps/src/lib/client/utils.rs b/apps/src/lib/client/utils.rs index 21fed46c11..99e7d4b7d4 100644 --- a/apps/src/lib/client/utils.rs +++ b/apps/src/lib/client/utils.rs @@ -531,6 +531,36 @@ pub fn init_network( keypair.ref_to() }); + let eth_hot_pk = try_parse_public_key( + format!("validator {name} eth hot key"), + &config.eth_hot_key, + ) + .unwrap_or_else(|| { + let alias = format!("{}-eth-hot-key", name); + println!("Generating validator {} eth hot key...", name); + let (_alias, keypair) = wallet.gen_key( + SchemeType::Secp256k1, + Some(alias), + unsafe_dont_encrypt, + ); + keypair.ref_to() + }); + + let eth_cold_pk = try_parse_public_key( + format!("validator {name} eth cold key"), + &config.eth_cold_key, + ) + .unwrap_or_else(|| { + let alias = format!("{}-eth-cold-key", name); + println!("Generating validator {} eth cold key...", name); + let (_alias, keypair) = wallet.gen_key( + SchemeType::Secp256k1, + Some(alias), + unsafe_dont_encrypt, + ); + keypair.ref_to() + }); + let dkg_pk = &config .dkg_public_key .as_ref() @@ -549,6 +579,7 @@ pub fn init_network( let validator_keys = wallet .gen_validator_keys( + Some(eth_hot_pk.clone()), Some(protocol_pk.clone()), SchemeType::Ed25519, ) @@ -563,6 +594,10 @@ pub fn init_network( Some(genesis_config::HexString(consensus_pk.to_string())); config.account_public_key = Some(genesis_config::HexString(account_pk.to_string())); + config.eth_cold_key = + Some(genesis_config::HexString(eth_cold_pk.to_string())); + config.eth_hot_key = + Some(genesis_config::HexString(eth_hot_pk.to_string())); config.protocol_public_key = Some(genesis_config::HexString(protocol_pk.to_string())); @@ -928,6 +963,12 @@ pub fn init_genesis_validator( consensus_public_key: Some(HexString( pre_genesis.consensus_key.ref_to().to_string(), )), + eth_cold_key: Some(HexString( + pre_genesis.eth_cold_key.ref_to().to_string(), + )), + eth_hot_key: Some(HexString( + pre_genesis.eth_hot_key.ref_to().to_string(), + )), account_public_key: Some(HexString( pre_genesis.account_key.ref_to().to_string(), )), diff --git a/apps/src/lib/config/ethereum_bridge/ledger.rs b/apps/src/lib/config/ethereum_bridge/ledger.rs new file mode 100644 index 0000000000..924546e0e2 --- /dev/null +++ b/apps/src/lib/config/ethereum_bridge/ledger.rs @@ -0,0 +1,46 @@ +//! Runtime configuration for a validator node. +#[allow(unused_imports)] +use namada::types::ethereum_events::EthereumEvent; +use serde::{Deserialize, Serialize}; + +/// Default [Ethereum JSON-RPC](https://ethereum.org/en/developers/docs/apis/json-rpc/) endpoint used by the oracle +pub const DEFAULT_ORACLE_RPC_ENDPOINT: &str = "http://127.0.0.1:8545"; + +/// The mode in which to run the Ethereum bridge. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum Mode { + /// Run `geth` in a subprocess, exposing an Ethereum + /// JSON-RPC endpoint at [`DEFAULT_ORACLE_RPC_ENDPOINT`]. By default, the + /// oracle is configured to listen for events from the Ethereum bridge + /// smart contracts using this endpoint. + Managed, + /// Do not run `geth`. The oracle will listen to the Ethereum JSON-RPC + /// endpoint as specified in the `oracle_rpc_endpoint` setting. + Remote, + /// Do not start a managed `geth` subprocess. Instead of the oracle + /// listening for events using a Ethereum JSON-RPC endpoint, an endpoint + /// will be exposed by the ledger itself for submission of Borsh- + /// serialized [`EthereumEvent`]s. Mostly useful for testing purposes. + EventsEndpoint, + /// Do not run any components of the Ethereum bridge. + Off, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct Config { + /// The mode in which to run the Ethereum node and oracle setup of this + /// validator. + pub mode: Mode, + /// The Ethereum JSON-RPC endpoint that the Ethereum event oracle will use + /// to listen for events from the Ethereum bridge smart contracts + pub oracle_rpc_endpoint: String, +} + +impl Default for Config { + fn default() -> Self { + Self { + mode: Mode::Managed, + oracle_rpc_endpoint: DEFAULT_ORACLE_RPC_ENDPOINT.to_owned(), + } + } +} diff --git a/apps/src/lib/config/ethereum_bridge/mod.rs b/apps/src/lib/config/ethereum_bridge/mod.rs new file mode 100644 index 0000000000..370e1150a2 --- /dev/null +++ b/apps/src/lib/config/ethereum_bridge/mod.rs @@ -0,0 +1 @@ +pub mod ledger; diff --git a/apps/src/lib/config/genesis.rs b/apps/src/lib/config/genesis.rs index ebad28f311..c5638cf071 100644 --- a/apps/src/lib/config/genesis.rs +++ b/apps/src/lib/config/genesis.rs @@ -6,12 +6,20 @@ use std::path::Path; use borsh::{BorshDeserialize, BorshSerialize}; use derivative::Derivative; +use namada::ledger::eth_bridge::EthereumBridgeConfig; +#[cfg(feature = "dev")] +use namada::ledger::eth_bridge::{Contracts, UpgradeableContract}; use namada::ledger::governance::parameters::GovParams; use namada::ledger::parameters::EpochDuration; use namada::ledger::pos::{GenesisValidator, PosParams}; +#[cfg(feature = "dev")] +use namada::types::address::wnam; use namada::types::address::Address; #[cfg(not(feature = "dev"))] use namada::types::chain::ChainId; +use namada::types::chain::ProposalBytes; +#[cfg(feature = "dev")] +use namada::types::ethereum_events::EthAddress; use namada::types::key::dkg_session_keys::DkgPublicKey; use namada::types::key::*; use namada::types::time::{DateTimeUtc, DurationSecs}; @@ -32,6 +40,7 @@ pub mod genesis_config { use namada::ledger::parameters::EpochDuration; use namada::ledger::pos::{GenesisValidator, PosParams}; use namada::types::address::Address; + use namada::types::chain::ProposalBytes; use namada::types::key::dkg_session_keys::DkgPublicKey; use namada::types::key::*; use namada::types::time::Rfc3339String; @@ -41,8 +50,8 @@ pub mod genesis_config { use thiserror::Error; use super::{ - EstablishedAccount, Genesis, ImplicitAccount, Parameters, TokenAccount, - Validator, + EstablishedAccount, EthereumBridgeConfig, Genesis, ImplicitAccount, + Parameters, TokenAccount, Validator, }; use crate::cli; @@ -123,6 +132,8 @@ pub mod genesis_config { pub pos_params: PosParamsConfig, // Governance parameters pub gov_params: GovernanceParamsConfig, + // Ethereum bridge config + pub ethereum_bridge_params: Option, // Wasm definitions pub wasm: HashMap, } @@ -162,6 +173,10 @@ pub mod genesis_config { pub struct ValidatorConfig { // Public key for consensus. (default: generate) pub consensus_public_key: Option, + // Public key (cold) for eth governance. (default: generate) + pub eth_cold_key: Option, + // Public key (hot) for eth bridge. (default: generate) + pub eth_hot_key: Option, // Public key for validator account. (default: generate) pub account_public_key: Option, // Public protocol signing key for validator account. (default: @@ -222,17 +237,30 @@ pub mod genesis_config { #[derive(Clone, Debug, Deserialize, Serialize)] pub struct ParametersConfig { - // Minimum number of blocks per epoch. + /// Max payload size, in bytes, for a tx batch proposal. + /// + /// Block proposers may never return a `PrepareProposal` + /// response containing `txs` with a byte length greater + /// than whatever is configured through this parameter. + /// + /// Note that this parameter's value will always be strictly + /// smaller than a Tendermint block's `MaxBytes` consensus + /// parameter. Currently, we hard cap `max_proposal_bytes` + /// at 90 MiB in Namada, which leaves at least 10 MiB of + /// room for header data, evidence and protobuf + /// serialization overhead in Tendermint blocks. + pub max_proposal_bytes: ProposalBytes, + /// Minimum number of blocks per epoch. // XXX: u64 doesn't work with toml-rs! pub min_num_of_blocks: u64, - // Maximum duration per block (in seconds). + /// Maximum duration per block (in seconds). // TODO: this is i64 because datetime wants it pub max_expected_time_per_block: i64, - // Hashes of whitelisted vps array. `None` value or an empty array - // disables whitelisting. + /// Hashes of whitelisted vps array. `None` value or an empty array + /// disables whitelisting. pub vp_whitelist: Option>, - // Hashes of whitelisted txs array. `None` value or an empty array - // disables whitelisting. + /// Hashes of whitelisted txs array. `None` value or an empty array + /// disables whitelisting. pub tx_whitelist: Option>, /// Filename of implicit accounts validity predicate WASM code pub implicit_vp: String, @@ -303,6 +331,18 @@ pub mod genesis_config { .unwrap() .to_public_key() .unwrap(), + eth_cold_key: config + .eth_cold_key + .as_ref() + .unwrap() + .to_public_key() + .unwrap(), + eth_hot_key: config + .eth_hot_key + .as_ref() + .unwrap() + .to_public_key() + .unwrap(), commission_rate: config .commission_rate .and_then(|rate| { @@ -503,6 +543,7 @@ pub mod genesis_config { pos_params, gov_params, wasm, + ethereum_bridge_params, } = config; let native_token = Address::decode( @@ -574,6 +615,7 @@ pub mod genesis_config { parameters.max_expected_time_per_block, ) .into(), + max_proposal_bytes: parameters.max_proposal_bytes, vp_whitelist: parameters.vp_whitelist.unwrap_or_default(), tx_whitelist: parameters.tx_whitelist.unwrap_or_default(), implicit_vp_code_path, @@ -637,6 +679,7 @@ pub mod genesis_config { parameters, pos_params, gov_params, + ethereum_bridge_params, }; genesis.init(); genesis @@ -685,6 +728,8 @@ pub struct Genesis { pub parameters: Parameters, pub pos_params: PosParams, pub gov_params: GovParams, + // Ethereum bridge config + pub ethereum_bridge_params: Option, } impl Genesis { @@ -796,6 +841,8 @@ pub struct ImplicitAccount { BorshDeserialize, )] pub struct Parameters { + // Max payload size, in bytes, for a tx batch proposal. + pub max_proposal_bytes: ProposalBytes, /// Epoch duration pub epoch_duration: EpochDuration, /// Maximum expected time per block @@ -843,13 +890,24 @@ pub fn genesis() -> Genesis { // `tests::gen_genesis_validator` below. let consensus_keypair = wallet::defaults::validator_keypair(); let account_keypair = wallet::defaults::validator_keypair(); + let secp_eth_cold_keypair = secp256k1::SecretKey::try_from_slice(&[ + 90, 83, 107, 155, 193, 251, 120, 27, 76, 1, 188, 8, 116, 121, 90, 99, + 65, 17, 187, 6, 238, 141, 63, 188, 76, 38, 102, 7, 47, 185, 28, 52, + ]) + .unwrap(); + + let eth_cold_keypair = + common::SecretKey::try_from_sk(&secp_eth_cold_keypair).unwrap(); let address = wallet::defaults::validator_address(); - let (protocol_keypair, dkg_keypair) = wallet::defaults::validator_keys(); + let (protocol_keypair, eth_bridge_keypair, dkg_keypair) = + wallet::defaults::validator_keys(); let validator = Validator { pos_data: GenesisValidator { address, tokens: token::Amount::whole(200_000), consensus_key: consensus_keypair.ref_to(), + eth_cold_key: eth_cold_keypair.ref_to(), + eth_hot_key: eth_bridge_keypair.ref_to(), commission_rate: dec!(0.05), max_commission_rate_change: dec!(0.01), }, @@ -867,6 +925,7 @@ pub fn genesis() -> Genesis { min_duration: namada::types::time::Duration::seconds(600).into(), }, max_expected_time_per_block: namada::types::time::DurationSecs(30), + max_proposal_bytes: Default::default(), vp_whitelist: vec![], tx_whitelist: vec![], implicit_vp_code_path: vp_implicit_path.into(), @@ -951,6 +1010,20 @@ pub fn genesis() -> Genesis { parameters, pos_params: PosParams::default(), gov_params: GovParams::default(), + ethereum_bridge_params: Some(EthereumBridgeConfig { + min_confirmations: Default::default(), + contracts: Contracts { + native_erc20: wnam(), + bridge: UpgradeableContract { + address: EthAddress([0; 20]), + version: Default::default(), + }, + governance: UpgradeableContract { + address: EthAddress([1; 20]), + version: Default::default(), + }, + }, + }), native_token: address::nam(), } } @@ -974,11 +1047,30 @@ pub mod tests { let keypair: common::SecretKey = ed25519::SigScheme::generate(&mut rng).try_to_sk().unwrap(); let kp_arr = keypair.try_to_vec().unwrap(); - let (protocol_keypair, dkg_keypair) = + let (protocol_keypair, _eth_hot_bridge_keypair, dkg_keypair) = wallet::defaults::validator_keys(); + + // TODO: derive validator eth address from an eth keypair + let eth_cold_gov_keypair: common::SecretKey = + secp256k1::SigScheme::generate(&mut rng) + .try_to_sk() + .unwrap(); + let eth_hot_bridge_keypair: common::SecretKey = + secp256k1::SigScheme::generate(&mut rng) + .try_to_sk() + .unwrap(); + println!("address: {}", address); println!("keypair: {:?}", kp_arr); println!("protocol_keypair: {:?}", protocol_keypair); println!("dkg_keypair: {:?}", dkg_keypair.try_to_vec().unwrap()); + println!( + "eth_cold_gov_keypair: {:?}", + eth_cold_gov_keypair.try_to_vec().unwrap() + ); + println!( + "eth_hot_bridge_keypair: {:?}", + eth_hot_bridge_keypair.try_to_vec().unwrap() + ); } } diff --git a/apps/src/lib/config/mod.rs b/apps/src/lib/config/mod.rs index 811289c790..e89aced251 100644 --- a/apps/src/lib/config/mod.rs +++ b/apps/src/lib/config/mod.rs @@ -1,5 +1,6 @@ //! Node and client configuration +pub mod ethereum_bridge; pub mod genesis; pub mod global; pub mod utils; @@ -73,6 +74,7 @@ pub struct Ledger { pub chain_id: ChainId, pub shell: Shell, pub tendermint: Tendermint, + pub ethereum_bridge: ethereum_bridge::ledger::Config, } #[derive(Clone, Debug, Serialize, Deserialize)] @@ -165,6 +167,7 @@ impl Ledger { ), instrumentation_namespace: "namadan_tm".to_string(), }, + ethereum_bridge: ethereum_bridge::ledger::Config::default(), } } diff --git a/apps/src/lib/node/ledger/ethereum_node/events.rs b/apps/src/lib/node/ledger/ethereum_node/events.rs new file mode 100644 index 0000000000..afd8e086fe --- /dev/null +++ b/apps/src/lib/node/ledger/ethereum_node/events.rs @@ -0,0 +1,1069 @@ +pub mod signatures { + pub const TRANSFER_TO_NAMADA_SIG: &str = + "TransferToNamada(uint256,(address,uint256,string)[],uint256)"; + pub const TRANSFER_TO_ETHEREUM_SIG: &str = + "TransferToErc(uint256,address[],address[],uint256[],uint32)"; + pub const VALIDATOR_SET_UPDATE_SIG: &str = + "ValidatorSetUpdate(uint256,bytes32,bytes32)"; + pub const NEW_CONTRACT_SIG: &str = "NewContract(string,address)"; + pub const UPGRADED_CONTRACT_SIG: &str = "UpgradedContract(string,address)"; + pub const UPDATE_BRIDGE_WHITELIST_SIG: &str = + "UpdateBridgeWhiteList(uint256,address[],uint256[])"; + pub const SIGNATURES: [&str; 6] = [ + TRANSFER_TO_NAMADA_SIG, + TRANSFER_TO_ETHEREUM_SIG, + VALIDATOR_SET_UPDATE_SIG, + NEW_CONTRACT_SIG, + UPGRADED_CONTRACT_SIG, + UPDATE_BRIDGE_WHITELIST_SIG, + ]; + + /// Used to determine which smart contract address + /// a signature belongs to + pub enum SigType { + Bridge, + Governance, + } + + impl From<&str> for SigType { + fn from(sig: &str) -> Self { + match sig { + TRANSFER_TO_NAMADA_SIG | TRANSFER_TO_ETHEREUM_SIG => { + SigType::Bridge + } + _ => SigType::Governance, + } + } + } +} + +pub mod eth_events { + use std::convert::TryInto; + use std::fmt::Debug; + use std::str::FromStr; + + use ethabi::decode; + #[cfg(test)] + use ethabi::encode; + use ethabi::param_type::ParamType; + use ethabi::token::Token; + use namada::types::address::Address; + use namada::types::ethereum_events::{ + EthAddress, EthereumEvent, TokenWhitelist, TransferToEthereum, + TransferToNamada, Uint, + }; + use namada::types::keccak::KeccakHash; + use namada::types::token::Amount; + use num256::Uint256; + use thiserror::Error; + + pub use super::signatures; + + #[derive(Error, Debug)] + pub enum Error { + #[error("Could not decode Ethereum event: {0}")] + Decode(String), + } + + pub type Result = std::result::Result; + + #[derive(Clone, Debug, PartialEq)] + /// An event waiting for a certain number of confirmations + /// before being sent to the ledger + pub(in super::super) struct PendingEvent { + /// number of confirmations to consider this event finalized + confirmations: Uint256, + /// the block height from which this event originated + block_height: Uint256, + /// the event itself + pub event: EthereumEvent, + } + + /// Event emitted with the validator set changes + #[derive(Clone, Debug, PartialEq)] + pub struct ValidatorSetUpdate { + /// A monotonically increasing nonce + nonce: Uint, + /// Hash of the validators in the bridge contract + bridge_validator_hash: KeccakHash, + /// Hash of the validators in the governance contract + governance_validator_hash: KeccakHash, + } + + /// Event indicating a new smart contract has been + /// deployed or upgraded on Ethereum + #[derive(Clone, Debug, PartialEq)] + pub(in super::super) struct ChangedContract { + /// Name of the contract + pub name: String, + /// Address of the contract on Ethereum + pub address: EthAddress, + } + + /// Event for whitelisting new tokens and their + /// rate limits + #[derive(Clone, Debug, PartialEq)] + struct UpdateBridgeWhitelist { + /// A monotonically increasing nonce + nonce: Uint, + /// Tokens to be allowed to be transferred across the bridge + whitelist: Vec, + } + + impl PendingEvent { + /// Decodes bytes into an [`EthereumEvent`] based on the signature. + /// This is is turned into a [`PendingEvent`] along with the block + /// height passed in here. + /// + /// If the event contains a confirmations field, + /// this is passed to the corresponding [`PendingEvent`] field, + /// otherwise a default is used. + pub fn decode( + signature: &str, + block_height: Uint256, + data: &[u8], + min_confirmations: Uint256, + ) -> Result { + match signature { + signatures::TRANSFER_TO_NAMADA_SIG => { + RawTransfersToNamada::decode(data).map(|txs| PendingEvent { + confirmations: min_confirmations + .max(txs.confirmations.into()), + block_height, + event: EthereumEvent::TransfersToNamada { + nonce: txs.nonce, + transfers: txs.transfers, + }, + }) + } + signatures::TRANSFER_TO_ETHEREUM_SIG => { + RawTransfersToEthereum::decode(data).map(|txs| { + PendingEvent { + confirmations: min_confirmations + .max(txs.confirmations.into()), + block_height, + event: EthereumEvent::TransfersToEthereum { + nonce: txs.nonce, + transfers: txs.transfers, + }, + } + }) + } + signatures::VALIDATOR_SET_UPDATE_SIG => { + ValidatorSetUpdate::decode(data).map( + |ValidatorSetUpdate { + nonce, + bridge_validator_hash, + governance_validator_hash, + }| PendingEvent { + confirmations: min_confirmations, + block_height, + event: EthereumEvent::ValidatorSetUpdate { + nonce, + bridge_validator_hash, + governance_validator_hash, + }, + }, + ) + } + signatures::NEW_CONTRACT_SIG => ChangedContract::decode(data) + .map(|ChangedContract { name, address }| PendingEvent { + confirmations: min_confirmations, + block_height, + event: EthereumEvent::NewContract { name, address }, + }), + signatures::UPGRADED_CONTRACT_SIG => ChangedContract::decode( + data, + ) + .map(|ChangedContract { name, address }| PendingEvent { + confirmations: min_confirmations, + block_height, + event: EthereumEvent::UpgradedContract { name, address }, + }), + signatures::UPDATE_BRIDGE_WHITELIST_SIG => { + UpdateBridgeWhitelist::decode(data).map( + |UpdateBridgeWhitelist { nonce, whitelist }| { + PendingEvent { + confirmations: min_confirmations, + block_height, + event: EthereumEvent::UpdateBridgeWhitelist { + nonce, + whitelist, + }, + } + }, + ) + } + _ => unreachable!(), + } + } + + /// Check if the minimum number of confirmations has been + /// reached at the input block height. + pub fn is_confirmed(&self, height: &Uint256) -> bool { + self.confirmations <= height.clone() - self.block_height.clone() + } + } + + /// A batch of [`TransferToNamada`] from an Ethereum event + #[derive(Clone, Debug, PartialEq)] + pub(super) struct RawTransfersToNamada { + /// A list of transfers + pub transfers: Vec, + /// A monotonically increasing nonce + #[allow(dead_code)] + pub nonce: Uint, + /// The number of confirmations needed to consider this batch + /// finalized + pub confirmations: u32, + } + + /// A batch of [`TransferToNamada`] from an Ethereum event + #[derive(Clone, Debug, PartialEq)] + pub(in super::super) struct RawTransfersToEthereum { + /// A list of transfers + pub transfers: Vec, + /// A monotonically increasing nonce + #[allow(dead_code)] + pub nonce: Uint, + /// The number of confirmations needed to consider this batch + /// finalized + pub confirmations: u32, + } + + impl RawTransfersToNamada { + /// Parse ABI serialized data from an Ethereum event into + /// an instance of [`RawTransfersToNamada`] + fn decode(data: &[u8]) -> Result { + let [nonce, transfers, confs]: [Token; 3] = decode( + &[ + ParamType::Uint(256), + ParamType::Array(Box::new(ParamType::Tuple(vec![ + ParamType::Address, + ParamType::Uint(256), + ParamType::String, + ]))), + ParamType::Uint(256), + ], + data, + ) + .map_err(|err| Error::Decode(format!("{:#?}", err)))? + .try_into() + .map_err(|error| { + Error::Decode(format!( + "TransferToNamada signature should contain three types: \ + {:?}", + error + )) + })?; + + Ok(Self { + transfers: transfers.parse_transfer_to_namada_array()?, + nonce: nonce.parse_uint256()?, + confirmations: confs.parse_u32()?, + }) + } + + /// Serialize an instance [`RawTransfersToNamada`] using Ethereum's + /// ABI serialization scheme. + #[cfg(test)] + fn encode(self) -> Vec { + let RawTransfersToNamada { + transfers, + nonce, + confirmations, + } = self; + + let transfers = transfers + .into_iter() + .map( + |TransferToNamada { + asset, + receiver, + amount, + }| { + Token::Tuple(vec![ + Token::Address(asset.0.into()), + Token::Uint(u64::from(amount).into()), + Token::String(receiver.to_string()), + ]) + }, + ) + .collect(); + + encode(&[ + Token::Uint(nonce.into()), + Token::Array(transfers), + Token::Uint(confirmations.into()), + ]) + } + } + + impl RawTransfersToEthereum { + /// Parse ABI serialized data from an Ethereum event into + /// an instance of [`RawTransfersToEthereum`] + fn decode(data: &[u8]) -> Result { + let [nonce, assets, receivers, amounts, confs]: [Token; 5] = + decode( + &[ + ParamType::Uint(256), + ParamType::Array(Box::new(ParamType::Address)), + ParamType::Array(Box::new(ParamType::Address)), + ParamType::Array(Box::new(ParamType::Uint(256))), + ParamType::Uint(32), + ], + data, + ) + .map_err(|err| Error::Decode(format!("{:?}", err)))? + .try_into() + .map_err(|_| { + Error::Decode( + "TransferToERC signature should contain five types" + .to_string(), + ) + })?; + + let assets = assets.parse_eth_address_array()?; + let receivers = receivers.parse_eth_address_array()?; + let amounts = amounts.parse_amount_array()?; + if assets.len() != amounts.len() { + Err(Error::Decode( + "Number of source addresses is different from number of \ + transfer amounts" + .into(), + )) + } else if receivers.len() != assets.len() { + Err(Error::Decode( + "Number of source addresses is different from number of \ + target addresses" + .into(), + )) + } else { + Ok(Self { + transfers: assets + .into_iter() + .zip(receivers.into_iter()) + .zip(amounts.into_iter()) + .map(|((asset, receiver), amount)| TransferToEthereum { + amount, + asset, + receiver, + }) + .collect(), + nonce: nonce.parse_uint256()?, + confirmations: confs.parse_u32()?, + }) + } + } + + /// Serialize an instance [`RawTransfersToNamada`] using Ethereum's + /// ABI serialization scheme. + #[cfg(test)] + pub fn encode(self) -> Vec { + let RawTransfersToEthereum { + transfers, + nonce, + confirmations, + } = self; + let amounts: Vec = transfers + .iter() + .map(|TransferToEthereum { amount, .. }| { + Token::Uint(u64::from(*amount).into()) + }) + .collect(); + let (assets, receivers): (Vec, Vec) = transfers + .into_iter() + .map( + |TransferToEthereum { + asset, receiver, .. + }| { + ( + Token::Address(asset.0.into()), + Token::Address(receiver.0.into()), + ) + }, + ) + .unzip(); + + encode(&[ + Token::Uint(nonce.into()), + Token::Array(assets), + Token::Array(receivers), + Token::Array(amounts), + Token::Uint(confirmations.into()), + ]) + } + } + + impl ValidatorSetUpdate { + /// Parse ABI serialized data from an Ethereum event into + /// an instance of [`ValidatorSetUpdate`] + fn decode(data: &[u8]) -> Result { + let [nonce, bridge_validator_hash, goverance_validator_hash]: [Token; + 3] = decode( + &[ + ParamType::Uint(256), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ], + data, + ) + .map_err(|err| Error::Decode(format!("{:?}", err)))? + .try_into() + .map_err(|_| { + Error::Decode( + "ValidatorSetUpdate signature should contain three types" + .into(), + ) + })?; + + Ok(Self { + nonce: nonce.parse_uint256()?, + bridge_validator_hash: bridge_validator_hash.parse_keccak()?, + governance_validator_hash: goverance_validator_hash + .parse_keccak()?, + }) + } + + /// Serialize an instance [`ValidatorSetUpdate`] using Ethereum's + /// ABI serialization scheme. + #[cfg(test)] + fn encode(self) -> Vec { + let ValidatorSetUpdate { + nonce, + bridge_validator_hash, + governance_validator_hash, + } = self; + + encode(&[ + Token::Uint(nonce.into()), + Token::FixedBytes(bridge_validator_hash.0.into()), + Token::FixedBytes(governance_validator_hash.0.into()), + ]) + } + } + + impl ChangedContract { + /// Parse ABI serialized data from an Ethereum event into + /// an instance of [`ChangedContract`] + fn decode(data: &[u8]) -> Result { + let [name, address]: [Token; 2] = + decode(&[ParamType::String, ParamType::Address], data) + .map_err(|err| Error::Decode(format!("{:?}", err)))? + .try_into() + .map_err(|_| { + Error::Decode( + "ContractUpdate signature should contain two types" + .into(), + ) + })?; + + Ok(Self { + name: name.parse_string()?, + address: address.parse_eth_address()?, + }) + } + + /// Serialize an instance [`ChangedContract`] using Ethereum's + /// ABI serialization scheme. + #[cfg(test)] + pub fn encode(self) -> Vec { + let ChangedContract { name, address } = self; + encode(&[Token::String(name), Token::Address(address.0.into())]) + } + } + + impl UpdateBridgeWhitelist { + /// Parse ABI serialized data from an Ethereum event into + /// an instance of [`UpdateBridgeWhitelist`] + fn decode(data: &[u8]) -> Result { + let [nonce, tokens, caps]: [Token; 3] = decode( + &[ + ParamType::Uint(256), + ParamType::Array(Box::new(ParamType::Address)), + ParamType::Array(Box::new(ParamType::Uint(256))), + ], + data, + ) + .map_err(|err| Error::Decode(format!("{:?}", err)))? + .try_into() + .map_err(|_| { + Error::Decode( + "UpdatedBridgeWhitelist signature should contain three \ + types" + .into(), + ) + })?; + + let tokens = tokens.parse_eth_address_array()?; + let caps = caps.parse_amount_array()?; + if tokens.len() != caps.len() { + Err(Error::Decode( + "UpdatedBridgeWhitelist received different number of \ + token address and token caps" + .into(), + )) + } else { + Ok(Self { + nonce: nonce.parse_uint256()?, + whitelist: tokens + .into_iter() + .zip(caps.into_iter()) + .map(|(token, cap)| TokenWhitelist { token, cap }) + .collect(), + }) + } + } + + /// Serialize an instance [`UpdateBridgeWhitelist`] using Ethereum's + /// ABI serialization scheme. + #[cfg(test)] + fn encode(self) -> Vec { + let UpdateBridgeWhitelist { nonce, whitelist } = self; + + let (tokens, caps): (Vec, Vec) = whitelist + .into_iter() + .map(|TokenWhitelist { token, cap }| { + ( + Token::Address(token.0.into()), + Token::Uint(u64::from(cap).into()), + ) + }) + .unzip(); + encode(&[ + Token::Uint(nonce.into()), + Token::Array(tokens), + Token::Array(caps), + ]) + } + } + + /// Trait to add parsing methods to `Token`, which is a + /// foreign type + trait Parse { + fn parse_eth_address(self) -> Result; + fn parse_address(self) -> Result
; + fn parse_amount(self) -> Result; + fn parse_u32(self) -> Result; + fn parse_uint256(self) -> Result; + fn parse_bool(self) -> Result; + fn parse_string(self) -> Result; + fn parse_keccak(self) -> Result; + fn parse_amount_array(self) -> Result>; + fn parse_eth_address_array(self) -> Result>; + fn parse_address_array(self) -> Result>; + fn parse_string_array(self) -> Result>; + fn parse_transfer_to_namada_array( + self, + ) -> Result>; + fn parse_transfer_to_namada(self) -> Result; + } + + impl Parse for Token { + fn parse_eth_address(self) -> Result { + if let Token::Address(addr) = self { + Ok(EthAddress(addr.0)) + } else { + Err(Error::Decode(format!( + "Expected type `Address`, got {:?}", + self + ))) + } + } + + fn parse_address(self) -> Result
{ + if let Token::String(addr) = self { + Address::from_str(&addr) + .map_err(|err| Error::Decode(format!("{:?}", err))) + } else { + Err(Error::Decode(format!( + "Expected type `String`, got {:?}", + self + ))) + } + } + + fn parse_amount(self) -> Result { + if let Token::Uint(amount) = self { + Ok(Amount::from(amount.as_u64())) + } else { + Err(Error::Decode(format!( + "Expected type `Uint`, got {:?}", + self + ))) + } + } + + fn parse_u32(self) -> Result { + if let Token::Uint(amount) = self { + Ok(amount.as_u32()) + } else { + Err(Error::Decode(format!( + "Expected type `Uint`, got {:?}", + self + ))) + } + } + + fn parse_uint256(self) -> Result { + if let Token::Uint(uint) = self { + Ok(uint.into()) + } else { + Err(Error::Decode(format!( + "Expected type `Uint`, got {:?}", + self + ))) + } + } + + fn parse_bool(self) -> Result { + if let Token::Bool(b) = self { + Ok(b) + } else { + Err(Error::Decode(format!( + "Expected type `bool`, got {:?}", + self + ))) + } + } + + fn parse_string(self) -> Result { + if let Token::String(string) = self { + Ok(string) + } else { + Err(Error::Decode(format!( + "Expected type `String`, got {:?}", + self + ))) + } + } + + fn parse_keccak(self) -> Result { + if let Token::FixedBytes(bytes) = self { + let bytes = bytes.try_into().map_err(|_| { + Error::Decode("Expect 32 bytes for a Keccak hash".into()) + })?; + Ok(KeccakHash(bytes)) + } else { + Err(Error::Decode(format!( + "Expected type `FixedBytes`, got {:?}", + self + ))) + } + } + + fn parse_amount_array(self) -> Result> { + let array = if let Token::Array(array) = self { + array + } else { + return Err(Error::Decode(format!( + "Expected type `Array`, got {:?}", + self + ))); + }; + let mut amounts = vec![]; + for token in array.into_iter() { + let amount = token.parse_amount()?; + amounts.push(amount); + } + Ok(amounts) + } + + fn parse_eth_address_array(self) -> Result> { + let array = if let Token::Array(array) = self { + array + } else { + return Err(Error::Decode(format!( + "Expected type `Array`, got {:?}", + self + ))); + }; + let mut addrs = vec![]; + for token in array.into_iter() { + let addr = token.parse_eth_address()?; + addrs.push(addr); + } + Ok(addrs) + } + + fn parse_transfer_to_namada_array( + self, + ) -> Result> { + let array = if let Token::Array(array) = self { + array + } else { + return Err(Error::Decode(format!( + "Expected type `Array`, got {:?}", + self + ))); + }; + let mut transfers = vec![]; + for token in array.into_iter() { + let transfer = token.parse_transfer_to_namada()?; + transfers.push(transfer); + } + Ok(transfers) + } + + fn parse_transfer_to_namada(self) -> Result { + if let Token::Tuple(mut items) = self { + let asset = items.remove(0).parse_eth_address()?; + let amount = items.remove(0).parse_amount()?; + let receiver = items.remove(0).parse_address()?; + Ok(TransferToNamada { + asset, + amount, + receiver, + }) + } else { + Err(Error::Decode(format!( + "Expected type `Tuple`, got {:?}", + self + ))) + } + } + + fn parse_address_array(self) -> Result> { + let array = if let Token::Array(array) = self { + array + } else { + return Err(Error::Decode(format!( + "Expected type `Array`, got {:?}", + self + ))); + }; + let mut addrs = vec![]; + for token in array.into_iter() { + let addr = token.parse_address()?; + addrs.push(addr); + } + Ok(addrs) + } + + fn parse_string_array(self) -> Result> { + let array = if let Token::Array(array) = self { + array + } else { + return Err(Error::Decode(format!( + "Expected type `Array`, got {:?}", + self + ))); + }; + let mut strings = vec![]; + for token in array.into_iter() { + let string = token.parse_string()?; + strings.push(string); + } + Ok(strings) + } + } + + #[cfg(test)] + mod test_events { + use assert_matches::assert_matches; + + use super::*; + + #[test] + fn test_transfer_to_namada_decode() { + let data: Vec = vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 95, 189, 178, 49, 86, 120, 175, 236, 179, 103, 240, + 50, 217, 63, 100, 47, 100, 24, 10, 163, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 84, 97, 116, 101, 115, 116, 49, 118, 52, 101, 104, + 103, 119, 51, 54, 120, 117, 117, 110, 119, 100, 54, 57, 56, 57, + 112, 114, 119, 100, 102, 107, 120, 113, 109, 110, 118, 115, + 102, 106, 120, 115, 54, 110, 118, 118, 54, 120, 120, 117, 99, + 114, 115, 51, 102, 51, 120, 99, 109, 110, 115, 51, 102, 99, + 120, 100, 122, 114, 118, 118, 122, 57, 120, 118, 101, 114, 122, + 118, 122, 114, 53, 54, 108, 101, 56, 102, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + ]; + + let raw = RawTransfersToNamada::decode(&data); + + let raw = raw.unwrap(); + assert_eq!( + raw.transfers, + vec![TransferToNamada { + amount: Amount::from(100), + asset: EthAddress::from_str("0x5FbDB2315678afecb367f032d93F642f64180aa3").unwrap(), + receiver: Address::decode("atest1v4ehgw36xuunwd6989prwdfkxqmnvsfjxs6nvv6xxucrs3f3xcmns3fcxdzrvvz9xverzvzr56le8f").unwrap(), + }] + ) + } + + /// Test that for Ethereum events for which a custom number of + /// confirmations may be specified, if a value lower than the + /// protocol-specified minimum confirmations is attempted to be used, + /// then the protocol-specified minimum confirmations is used instead. + #[test] + fn test_min_confirmations_enforced() -> Result<()> { + let arbitrary_block_height: Uint256 = 123u64.into(); + let min_confirmations: Uint256 = 100u64.into(); + let lower_than_min_confirmations = 5; + + let (sig, event) = ( + signatures::TRANSFER_TO_NAMADA_SIG, + RawTransfersToNamada { + transfers: vec![], + nonce: 0.into(), + confirmations: lower_than_min_confirmations, + }, + ); + let data = event.encode(); + let pending_event = PendingEvent::decode( + sig, + arbitrary_block_height.clone(), + &data, + min_confirmations.clone(), + )?; + + assert_matches!(pending_event, PendingEvent { confirmations, .. } if confirmations == min_confirmations); + + let (sig, event) = ( + signatures::TRANSFER_TO_ETHEREUM_SIG, + RawTransfersToEthereum { + transfers: vec![], + nonce: 0.into(), + confirmations: lower_than_min_confirmations, + }, + ); + let data = event.encode(); + let pending_event = PendingEvent::decode( + sig, + arbitrary_block_height, + &data, + min_confirmations.clone(), + )?; + + assert_matches!(pending_event, PendingEvent { confirmations, .. } if confirmations == min_confirmations); + + Ok(()) + } + + /// Test that for Ethereum events for which a custom number of + /// confirmations may be specified, the custom number is used if it is + /// at least the protocol-specified minimum confirmations. + #[test] + fn test_custom_confirmations_used() { + let arbitrary_block_height: Uint256 = 123u64.into(); + let min_confirmations: Uint256 = 100u64.into(); + let higher_than_min_confirmations = 200; + + let (sig, event) = ( + signatures::TRANSFER_TO_NAMADA_SIG, + RawTransfersToNamada { + transfers: vec![], + nonce: 0.into(), + confirmations: higher_than_min_confirmations, + }, + ); + let data = event.encode(); + let pending_event = PendingEvent::decode( + sig, + arbitrary_block_height.clone(), + &data, + min_confirmations.clone(), + ) + .unwrap(); + + assert_matches!(pending_event, PendingEvent { confirmations, .. } if confirmations == higher_than_min_confirmations.into()); + + let (sig, event) = ( + signatures::TRANSFER_TO_ETHEREUM_SIG, + RawTransfersToEthereum { + transfers: vec![], + nonce: 0.into(), + confirmations: higher_than_min_confirmations, + }, + ); + let data = event.encode(); + let pending_event = PendingEvent::decode( + sig, + arbitrary_block_height, + &data, + min_confirmations, + ) + .unwrap(); + + assert_matches!(pending_event, PendingEvent { confirmations, .. } if confirmations == higher_than_min_confirmations.into()); + } + + /// For each of the basic types, test that roundtrip + /// encoding - decoding is a no-op + #[test] + fn test_round_trips() { + let erc = EthAddress([1; 20]); + let address = Address::from_str("atest1v4ehgw36gep5ysecxq6nyv3jg3zygv3e89qn2vp48pryxsf4xpznvve5gvmy23fs89pryvf5a6ht90") + .expect("Test failed"); + let amount = Amount::from(42u64); + let confs = 50u32; + let uint = Uint::from(42u64); + let boolean = true; + let string = String::from("test"); + let keccak = KeccakHash([2; 32]); + + let [token]: [Token; 1] = decode( + &[ParamType::Address], + encode(&[Token::Address(erc.0.into())]).as_slice(), + ) + .expect("Test failed") + .try_into() + .expect("Test failed"); + assert_eq!(token.parse_eth_address().expect("Test failed"), erc); + + let [token]: [Token; 1] = decode( + &[ParamType::String], + encode(&[Token::String(address.to_string())]).as_slice(), + ) + .expect("Test failed") + .try_into() + .expect("Test failed"); + assert_eq!(token.parse_address().expect("Test failed"), address); + + let [token]: [Token; 1] = decode( + &[ParamType::Uint(64)], + encode(&[Token::Uint(u64::from(amount).into())]).as_slice(), + ) + .expect("Test failed") + .try_into() + .expect("Test failed"); + assert_eq!(token.parse_amount().expect("Test failed"), amount); + + let [token]: [Token; 1] = decode( + &[ParamType::Uint(32)], + encode(&[Token::Uint(confs.into())]).as_slice(), + ) + .expect("Test failed") + .try_into() + .expect("Test failed"); + assert_eq!(token.parse_u32().expect("Test failed"), confs); + + let [token]: [Token; 1] = decode( + &[ParamType::Uint(256)], + encode(&[Token::Uint(uint.clone().into())]).as_slice(), + ) + .expect("Test failed") + .try_into() + .expect("Test failed"); + assert_eq!(token.parse_uint256().expect("Test failed"), uint); + + let [token]: [Token; 1] = decode( + &[ParamType::Bool], + encode(&[Token::Bool(boolean)]).as_slice(), + ) + .expect("Test failed") + .try_into() + .expect("Test failed"); + assert_eq!(token.parse_bool().expect("Test failed"), boolean); + + let [token]: [Token; 1] = decode( + &[ParamType::String], + encode(&[Token::String(string.clone())]).as_slice(), + ) + .expect("Test failed") + .try_into() + .expect("Test failed"); + assert_eq!(token.parse_string().expect("Test failed"), string); + + let [token]: [Token; 1] = decode( + &[ParamType::FixedBytes(32)], + encode(&[Token::FixedBytes(keccak.0.to_vec())]).as_slice(), + ) + .expect("Test failed") + .try_into() + .expect("Test failed"); + assert_eq!(token.parse_keccak().expect("Test failed"), keccak); + } + + /// Test that serialization and deserialization of + /// complex composite types is a no-op + #[test] + fn test_complex_round_trips() { + let address = Address::from_str("atest1v4ehgw36gep5ysecxq6nyv3jg3zygv3e89qn2vp48pryxsf4xpznvve5gvmy23fs89pryvf5a6ht90") + .expect("Test failed"); + let nam_transfers = RawTransfersToNamada { + transfers: vec![ + TransferToNamada { + amount: Default::default(), + asset: EthAddress([0; 20]), + receiver: address, + }; + 2 + ], + nonce: Uint::from(1), + confirmations: 0, + }; + let eth_transfers = RawTransfersToEthereum { + transfers: vec![ + TransferToEthereum { + amount: Default::default(), + asset: EthAddress([1; 20]), + receiver: EthAddress([2; 20]) + }; + 2 + ], + nonce: Uint::from(1), + confirmations: 0, + }; + let update = ValidatorSetUpdate { + nonce: Uint::from(1), + bridge_validator_hash: KeccakHash([1; 32]), + governance_validator_hash: KeccakHash([2; 32]), + }; + let changed = ChangedContract { + name: "Test".to_string(), + address: EthAddress([0; 20]), + }; + let whitelist = UpdateBridgeWhitelist { + nonce: Uint::from(1), + whitelist: vec![ + TokenWhitelist { + token: EthAddress([0; 20]), + cap: Amount::from(1000), + }; + 2 + ], + }; + assert_eq!( + RawTransfersToNamada::decode(&nam_transfers.clone().encode()) + .expect("Test failed"), + nam_transfers + ); + assert_eq!( + RawTransfersToEthereum::decode(ð_transfers.clone().encode()) + .expect("Test failed"), + eth_transfers + ); + assert_eq!( + ValidatorSetUpdate::decode(&update.clone().encode()) + .expect("Test failed"), + update + ); + assert_eq!( + ChangedContract::decode(&changed.clone().encode()) + .expect("Test failed"), + changed + ); + assert_eq!( + UpdateBridgeWhitelist::decode(&whitelist.clone().encode()) + .expect("Test failed"), + whitelist + ); + } + } +} + +pub use eth_events::*; diff --git a/apps/src/lib/node/ledger/ethereum_node/mod.rs b/apps/src/lib/node/ledger/ethereum_node/mod.rs new file mode 100644 index 0000000000..def2bbad96 --- /dev/null +++ b/apps/src/lib/node/ledger/ethereum_node/mod.rs @@ -0,0 +1,188 @@ +pub mod events; +pub mod oracle; +pub mod test_tools; +use std::ffi::OsString; + +use thiserror::Error; +use tokio::sync::oneshot::{Receiver, Sender}; + +#[derive(Error, Debug)] +pub enum Error { + #[error("Failed to start Ethereum fullnode: {0}")] + StartUp(std::io::Error), + #[error("{0}")] + Runtime(String), + #[error( + "The receiver of the Ethereum relayer messages unexpectedly dropped" + )] + RelayerReceiverDropped, + #[error("The Ethereum Oracle process unexpectedly stopped")] + Oracle, + #[error( + "Could not read Ethereum network to connect to from env var: {0:?}" + )] + EthereumNetwork(OsString), + #[error("Could not decode Ethereum event: {0}")] + Decode(String), +} + +pub type Result = std::result::Result; + +/// Monitor the Ethereum fullnode subprocess, returning only once it has +/// stopped running. If a signal is sent on `abort_recv`, the fullnode +/// subprocess will be killed. +pub async fn monitor( + mut node: eth_fullnode::EthereumNode, + abort_recv: Receiver>, +) { + tokio::select! { + // wait for the Ethereum fullnode to naturally exit + exit_status = node.wait() => { + match exit_status { + Ok(exit_status) => { + if exit_status.success() { + tracing::info!(%exit_status, "Ethereum fullnode exited"); + } else { + tracing::warn!(%exit_status, "Ethereum fullnode exited with nonzero exit code"); + } + }, + Err(err) => { + tracing::warn!("Error while waiting for the Ethereum fullnode to exit: {err}"); + tracing::info!("Ensuring Ethereum fullnode is shut down..."); + node.kill().await; + }, + }; + }, + // wait for an abort signal + resp_sender = abort_recv => { + match resp_sender { + Ok(resp_sender) => { + tracing::info!("Shutting down Ethereum fullnode..."); + node.kill().await; + resp_sender.send(()).unwrap(); + }, + Err(err) => { + tracing::error!("The Ethereum abort sender has unexpectedly dropped: {}", err); + tracing::info!("Shutting down Ethereum fullnode..."); + node.kill().await; + } + } + } + } +} + +/// Tools for running a geth fullnode process +pub mod eth_fullnode { + use std::io; + use std::process::ExitStatus; + use std::time::Duration; + + use tokio::process::{Child, Command}; + use tokio::task::LocalSet; + use web30::client::Web3; + + use super::{Error, Result}; + + /// A handle to a running geth process and a channel + /// that indicates it should shut down if the oracle + /// stops. + pub struct EthereumNode { + process: Child, + } + + /// Read from environment variable which Ethereum + /// network to connect to. Defaults to mainnet if + /// no variable is set. + /// + /// Returns an error if the env var is defined but not + /// a valid unicode + fn get_eth_network() -> Result> { + match std::env::var("ETHEREUM_NETWORK") { + Ok(path) => { + tracing::info!("Connecting to Ethereum network: {}", &path); + Ok(Some(format!("--{}", path))) + } + Err(std::env::VarError::NotPresent) => { + tracing::info!("Connecting to Ethereum mainnet"); + Ok(None) + } + Err(std::env::VarError::NotUnicode(msg)) => { + Err(Error::EthereumNetwork(msg)) + } + } + } + + impl EthereumNode { + /// Starts the geth process and returns a handle to it. + /// + /// First looks up which network to connect to from an env var. + /// It then starts the process and waits for it to finish + /// syncing. + pub async fn new(url: &str) -> Result { + // we have to start the node in a [`LocalSet`] due to the web30 + // crate + LocalSet::new() + .run_until(async move { + // the geth fullnode process + let network = get_eth_network()?; + let args = match &network { + Some(network) => { + vec![ + "--syncmode", + "snap", + network.as_str(), + "--http", + ] + } + None => vec!["--syncmode", "snap", "--http"], + }; + let ethereum_node = Command::new("geth") + .args(&args) + .kill_on_drop(true) + .spawn() + .map_err(Error::StartUp)?; + tracing::info!("Ethereum fullnode started"); + + // it takes a brief amount of time to open up the websocket + // on geth's end + const CLIENT_TIMEOUT: Duration = Duration::from_secs(5); + let client = Web3::new(url, CLIENT_TIMEOUT); + + const SLEEP_DUR: Duration = Duration::from_secs(1); + tracing::info!(?url, "Checking Geth status"); + loop { + if let Ok(false) = client.eth_syncing().await { + tracing::info!(?url, "Finished syncing"); + break; + } + if let Err(error) = client.eth_syncing().await { + // This is very noisy and usually not interesting. + // Still can be very useful + tracing::debug!( + ?url, + ?error, + "Couldn't check Geth sync status" + ); + } + tokio::time::sleep(SLEEP_DUR).await; + } + + let node = Self { + process: ethereum_node, + }; + Ok(node) + }) + .await + } + + /// Wait for the process to finish. + pub async fn wait(&mut self) -> io::Result { + self.process.wait().await + } + + /// Stop the geth process + pub async fn kill(&mut self) { + self.process.kill().await.unwrap(); + } + } +} diff --git a/apps/src/lib/node/ledger/ethereum_node/oracle/config.rs b/apps/src/lib/node/ledger/ethereum_node/oracle/config.rs new file mode 100644 index 0000000000..eaefa9aa84 --- /dev/null +++ b/apps/src/lib/node/ledger/ethereum_node/oracle/config.rs @@ -0,0 +1,30 @@ +//! Configuration for an oracle. +use std::num::NonZeroU64; + +use namada::types::ethereum_events::EthAddress; + +/// Configuration for an oracle. +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Ord, PartialOrd)] +pub struct Config { + /// The minimum number of block confirmations an Ethereum block must have + /// before it will be checked for bridge events. + pub min_confirmations: NonZeroU64, + /// The Ethereum address of the current bridge contract. + pub bridge_contract: EthAddress, + /// The Ethereum address of the current governance contract. + pub governance_contract: EthAddress, +} + +// TODO: this production Default implementation is temporary, there should be no +// default config - initialization should always be from storage. +impl std::default::Default for Config { + fn default() -> Self { + Self { + // SAFETY: we must always call NonZeroU64::new_unchecked here with a + // value that is >= 1 + min_confirmations: unsafe { NonZeroU64::new_unchecked(100) }, + bridge_contract: EthAddress([0; 20]), + governance_contract: EthAddress([1; 20]), + } + } +} diff --git a/apps/src/lib/node/ledger/ethereum_node/oracle/control.rs b/apps/src/lib/node/ledger/ethereum_node/oracle/control.rs new file mode 100644 index 0000000000..accf7d0a18 --- /dev/null +++ b/apps/src/lib/node/ledger/ethereum_node/oracle/control.rs @@ -0,0 +1,23 @@ +//! The oracle is controlled by sending commands over a channel. + +use tokio::sync::mpsc; + +use super::config::Config; + +/// Used to send commands to an oracle. +pub type Sender = mpsc::Sender; +/// Used by an oracle to receive commands. +pub type Receiver = mpsc::Receiver; + +/// Returns two sides of a [`mpsc`] channel that can be used for controlling an +/// oracle. +pub fn channel() -> (Sender, Receiver) { + mpsc::channel(1) +} + +/// Commands used to configure and control an `Oracle`. +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Ord, PartialOrd)] +pub enum Command { + /// Sends a configuration to the oracle for it to use. + SendConfig { config: Config }, +} diff --git a/apps/src/lib/node/ledger/ethereum_node/oracle/mod.rs b/apps/src/lib/node/ledger/ethereum_node/oracle/mod.rs new file mode 100644 index 0000000000..b23747be0c --- /dev/null +++ b/apps/src/lib/node/ledger/ethereum_node/oracle/mod.rs @@ -0,0 +1,829 @@ +pub mod config; +pub mod control; + +use std::ops::Deref; +use std::time::Duration; + +use clarity::Address; +use eyre::{eyre, Result}; +use namada::types::ethereum_events::EthereumEvent; +use num256::Uint256; +use tokio::sync::mpsc::Sender as BoundedSender; +use tokio::task::LocalSet; +#[cfg(not(test))] +use web30::client::Web3; + +use self::config::Config; +use super::events::{signatures, PendingEvent}; +#[cfg(test)] +use super::test_tools::mock_web3_client::Web3; + +/// The default amount of time the oracle will wait between processing blocks +const DEFAULT_BACKOFF: Duration = std::time::Duration::from_secs(1); + +/// A client that can talk to geth and parse +/// and relay events relevant to Namada to the +/// ledger process +pub struct Oracle { + /// The client that talks to the Ethereum fullnode + client: Web3, + /// A channel for sending processed and confirmed + /// events to the ledger process + sender: BoundedSender, + /// How long the oracle should wait between checking blocks + backoff: Duration, + /// A channel for controlling and configuring the oracle. + control: control::Receiver, +} + +impl Deref for Oracle { + type Target = Web3; + + fn deref(&self) -> &Self::Target { + &self.client + } +} + +impl Oracle { + /// Construct a new [`Oracle`]. Note that it can not do anything until it + /// has been sent a configuration via the passed in `control` channel. + pub fn new( + url: &str, + sender: BoundedSender, + backoff: Duration, + control: control::Receiver, + ) -> Self { + Self { + client: Web3::new(url, std::time::Duration::from_secs(30)), + sender, + backoff, + control, + } + } + + /// Send a series of [`EthereumEvent`]s to the Namada + /// ledger. Returns a boolean indicating that all sent + /// successfully. If false is returned, the receiver + /// has hung up. + /// + /// N.B. this will block if the internal channel buffer + /// is full. + async fn send(&self, events: Vec) -> bool { + if self.sender.is_closed() { + return false; + } + for event in events.into_iter() { + if self.sender.send(event).await.is_err() { + return false; + } + } + true + } + + async fn sleep(&self) { + tokio::time::sleep(self.backoff).await; + } +} + +/// Block until an initial configuration is received via the command channel. +/// Returns the initial config once received, or `None` if the command channel +/// is closed. +async fn await_initial_configuration( + receiver: &mut control::Receiver, +) -> Option { + match receiver.recv().await { + Some(cmd) => match cmd { + control::Command::SendConfig { config } => Some(config), + }, + None => None, + } +} + +/// Set up an Oracle and run the process where the Oracle +/// processes and forwards Ethereum events to the ledger +pub fn run_oracle( + url: impl AsRef, + sender: BoundedSender, + control: control::Receiver, +) -> tokio::task::JoinHandle<()> { + let url = url.as_ref().to_owned(); + // we have to run the oracle in a [`LocalSet`] due to the web30 + // crate + tokio::task::spawn_blocking(move || { + let rt = tokio::runtime::Handle::current(); + rt.block_on(async move { + LocalSet::new() + .run_until(async move { + tracing::info!(?url, "Ethereum event oracle is starting"); + + let oracle = + Oracle::new(&url, sender, DEFAULT_BACKOFF, control); + run_oracle_aux(oracle).await; + + tracing::info!( + ?url, + "Ethereum event oracle is no longer running" + ); + }) + .await + }); + }) +} + +/// Given an oracle, watch for new Ethereum events, processing +/// them into Namada native types. +/// +/// It also checks that once the specified number of confirmations +/// is reached, an event is forwarded to the ledger process +async fn run_oracle_aux(mut oracle: Oracle) { + tracing::info!("Oracle is awaiting initial configuration"); + let config = match await_initial_configuration(&mut oracle.control).await { + Some(config) => { + tracing::info!(?config, "Oracle received initial configuration"); + config + } + None => { + tracing::debug!( + "Oracle control channel was closed before the oracle could be \ + configured" + ); + return; + } + }; + + // Initialize a queue to keep events which are awaiting a certain number of + // confirmations + let mut pending: Vec = Vec::new(); + + // TODO(namada#560): get the appropriate Ethereum block height to start + // checking from rather than starting from zero every time + let mut next_block_to_process: Uint256 = 0u8.into(); + + loop { + tracing::info!( + ?next_block_to_process, + "Checking Ethereum block for bridge events" + ); + tokio::select! { + result = process(&oracle, &config, &mut pending, next_block_to_process.clone()) => { + match result { + Ok(()) => next_block_to_process += 1u8.into(), + Err(error) => tracing::warn!( + ?error, + block = ?next_block_to_process, + "Error while trying to process Ethereum block" + ), + } + }, + _ = oracle.sender.closed() => { + tracing::info!( + "Ethereum oracle can not send events to the ledger; the \ + receiver has hung up. Shutting down" + ); + break + } + }; + oracle.sleep().await; + } +} + +/// Checks if the given block has any events relating to the bridge, and if so, +/// sends them to the oracle's `sender` channel +async fn process( + oracle: &Oracle, + config: &Config, + pending: &mut Vec, + block_to_process: Uint256, +) -> Result<()> { + // update the latest block height + let latest_block = loop { + let latest_block = match oracle.eth_block_number().await { + Ok(height) => height, + Err(error) => { + return Err(eyre!( + "Couldn't get the latest synced Ethereum block height \ + from the RPC endpoint: {error:?}", + )); + } + }; + let minimum_latest_block = block_to_process.clone() + + Uint256::from(u64::from(config.min_confirmations)); + if minimum_latest_block > latest_block { + tracing::debug!( + ?block_to_process, + ?latest_block, + ?minimum_latest_block, + "Waiting for enough Ethereum blocks to be synced" + ); + // this isn't an error condition, so we continue in the loop here + // with a back off + oracle.sleep().await; + continue; + } + break latest_block; + }; + tracing::debug!( + ?block_to_process, + ?latest_block, + "Got latest Ethereum block height" + ); + // check for events in Ethereum blocks that have reached the minimum number + // of confirmations + for sig in signatures::SIGNATURES { + let addr: Address = match signatures::SigType::from(sig) { + signatures::SigType::Bridge => config.bridge_contract.0.into(), + signatures::SigType::Governance => { + config.governance_contract.0.into() + } + }; + tracing::debug!( + ?block_to_process, + ?addr, + ?sig, + "Checking for bridge events" + ); + // fetch the events for matching the given signature + let mut events = { + let logs = match oracle + .check_for_events( + block_to_process.clone(), + Some(block_to_process.clone()), + vec![addr], + vec![sig], + ) + .await + { + Ok(logs) => logs, + Err(error) => { + return Err(eyre!( + "Couldn't check for events ({sig} from {addr}) with \ + the RPC endpoint: {error:?}", + )); + } + }; + if !logs.is_empty() { + tracing::info!( + ?block_to_process, + ?addr, + ?sig, + n_events = logs.len(), + "Found bridge events in Ethereum block" + ) + } + logs.into_iter() + .filter_map(|log| { + match PendingEvent::decode( + sig, + block_to_process.clone(), + log.data.0.as_slice(), + u64::from(config.min_confirmations).into(), + ) { + Ok(event) => Some(event), + Err(error) => { + tracing::error!( + ?error, + ?block_to_process, + ?addr, + ?sig, + "Couldn't decode event: {:#?}", + log + ); + None + } + } + }) + .collect() + }; + pending.append(&mut events); + if !pending.is_empty() { + tracing::info!( + ?block_to_process, + ?addr, + ?sig, + pending = pending.len(), + "There are Ethereum events pending" + ); + } + let confirmed = process_queue(&latest_block, pending); + if !confirmed.is_empty() { + tracing::info!( + ?block_to_process, + ?addr, + ?sig, + pending = pending.len(), + confirmed = confirmed.len(), + min_confirmations = ?config.min_confirmations, + "Some events that have reached the minimum number of \ + confirmations and will be sent onwards" + ); + } + if !oracle.send(confirmed).await { + return Err(eyre!( + "Could not send all bridge events ({sig} from {addr}) to the \ + shell" + )); + } + } + Ok(()) +} + +/// Check which events in the queue have reached their +/// required number of confirmations and remove them +/// from the queue of pending events +fn process_queue( + latest_block: &Uint256, + pending: &mut Vec, +) -> Vec { + let mut pending_tmp: Vec = Vec::with_capacity(pending.len()); + std::mem::swap(&mut pending_tmp, pending); + let mut confirmed = vec![]; + for item in pending_tmp.into_iter() { + if item.is_confirmed(latest_block) { + confirmed.push(item.event); + } else { + pending.push(item); + } + } + confirmed +} + +#[cfg(test)] +mod test_oracle { + use std::num::NonZeroU64; + + use namada::types::ethereum_events::{EthAddress, TransferToEthereum}; + use tokio::sync::oneshot::channel; + use tokio::time::timeout; + + use super::*; + use crate::node::ledger::ethereum_node::events::{ + ChangedContract, RawTransfersToEthereum, + }; + use crate::node::ledger::ethereum_node::test_tools::mock_web3_client::{ + MockEventType, TestCmd, Web3, + }; + + /// The data returned from setting up a test + struct TestPackage { + oracle: Oracle, + admin_channel: tokio::sync::mpsc::UnboundedSender, + eth_recv: tokio::sync::mpsc::Receiver, + control_sender: control::Sender, + blocks_processed_recv: tokio::sync::mpsc::UnboundedReceiver, + } + + /// Helper function that starts running the oracle in a new thread, and + /// initializes it with a simple default configuration that is appropriate + /// for tests. + async fn start_with_default_config( + oracle: Oracle, + control_sender: control::Sender, + config: Config, + ) -> tokio::task::JoinHandle<()> { + let handle = tokio::task::spawn_blocking(move || { + let rt = tokio::runtime::Handle::current(); + rt.block_on(async move { + LocalSet::new() + .run_until(async move { + run_oracle_aux(oracle).await; + }) + .await + }); + }); + control_sender + .send(control::Command::SendConfig { config }) + .await + .unwrap(); + handle + } + + /// Set up an oracle with a mock web3 client that we can control + fn setup() -> TestPackage { + let (admin_channel, blocks_processed_recv, client) = Web3::setup(); + let (eth_sender, eth_receiver) = tokio::sync::mpsc::channel(1000); + let (control_sender, control_receiver) = control::channel(); + TestPackage { + oracle: Oracle { + client, + sender: eth_sender, + // backoff should be short for tests so that they run faster + backoff: Duration::from_millis(5), + control: control_receiver, + }, + admin_channel, + eth_recv: eth_receiver, + control_sender, + blocks_processed_recv, + } + } + + /// Test that if the fullnode stops, the oracle + /// shuts down, even if the web3 client is unresponsive + #[tokio::test] + async fn test_shutdown() { + let TestPackage { + oracle, + eth_recv, + admin_channel, + control_sender, + .. + } = setup(); + let oracle = start_with_default_config( + oracle, + control_sender, + Config::default(), + ) + .await; + admin_channel + .send(TestCmd::Unresponsive) + .expect("Test failed"); + drop(eth_recv); + oracle.await.expect("Test failed"); + } + + /// Test that if no logs are received from the web3 + /// client, no events are sent out + #[tokio::test] + async fn test_no_logs_no_op() { + let TestPackage { + oracle, + mut eth_recv, + admin_channel, + blocks_processed_recv: _processed, + control_sender, + .. + } = setup(); + let oracle = start_with_default_config( + oracle, + control_sender, + Config::default(), + ) + .await; + admin_channel + .send(TestCmd::NewHeight(Uint256::from(150u32))) + .expect("Test failed"); + + let mut time = std::time::Duration::from_secs(1); + while time > std::time::Duration::from_millis(10) { + assert!(eth_recv.try_recv().is_err()); + time -= std::time::Duration::from_millis(10); + } + drop(eth_recv); + oracle.await.expect("Test failed"); + } + + /// Test that if a new block height doesn't increase, + /// no events are sent out even if there are + /// some in the logs. + #[tokio::test] + async fn test_cant_get_new_height() { + let TestPackage { + oracle, + mut eth_recv, + admin_channel, + blocks_processed_recv: _processed, + control_sender, + .. + } = setup(); + let config = Config { + min_confirmations: NonZeroU64::try_from(100) + .expect("Test wasn't set up correctly"), + ..Config::default() + }; + let oracle = + start_with_default_config(oracle, control_sender, config).await; + // Increase height above the configured minimum confirmations + admin_channel + .send(TestCmd::NewHeight( + u64::from(config.min_confirmations).into(), + )) + .expect("Test failed"); + + let new_event = ChangedContract { + name: "Test".to_string(), + address: EthAddress([0; 20]), + } + .encode(); + let (sender, _) = channel(); + admin_channel + .send(TestCmd::NewEvent { + event_type: MockEventType::NewContract, + data: new_event, + height: 101, + seen: sender, + }) + .expect("Test failed"); + // since height is not updating, we should not receive events + let mut time = std::time::Duration::from_secs(1); + while time > std::time::Duration::from_millis(10) { + assert!(eth_recv.try_recv().is_err()); + time -= std::time::Duration::from_millis(10); + } + drop(eth_recv); + oracle.await.expect("Test failed"); + } + + /// Test that the oracle waits until new logs + /// are received before sending them on. + #[tokio::test] + async fn test_wait_on_new_logs() { + let TestPackage { + oracle, + eth_recv, + admin_channel, + blocks_processed_recv: _processed, + control_sender, + .. + } = setup(); + let config = Config { + min_confirmations: NonZeroU64::try_from(100) + .expect("Test wasn't set up correctly"), + ..Config::default() + }; + let oracle = + start_with_default_config(oracle, control_sender, config).await; + // Increase height above the configured minimum confirmations + admin_channel + .send(TestCmd::NewHeight( + u64::from(config.min_confirmations).into(), + )) + .expect("Test failed"); + + // set the oracle to be unresponsive + admin_channel + .send(TestCmd::Unresponsive) + .expect("Test failed"); + // send a new event to the oracle + let new_event = ChangedContract { + name: "Test".to_string(), + address: EthAddress([0; 20]), + } + .encode(); + let (sender, mut seen) = channel(); + admin_channel + .send(TestCmd::NewEvent { + event_type: MockEventType::NewContract, + data: new_event, + height: 150, + seen: sender, + }) + .expect("Test failed"); + // set the height high enough to emit the event + admin_channel + .send(TestCmd::NewHeight(Uint256::from(251u32))) + .expect("Test failed"); + + // the event should not be emitted even though the height is large + // enough + let mut time = std::time::Duration::from_secs(1); + while time > std::time::Duration::from_millis(10) { + assert!(seen.try_recv().is_err()); + time -= std::time::Duration::from_millis(10); + } + // check that when web3 becomes responsive, oracle sends event + admin_channel.send(TestCmd::Normal).expect("Test failed"); + seen.await.expect("Test failed"); + drop(eth_recv); + oracle.await.expect("Test failed"); + } + + /// Test that events are only sent when they + /// reach the required number of confirmations + #[tokio::test] + async fn test_finality_gadget() { + let TestPackage { + oracle, + mut eth_recv, + admin_channel, + blocks_processed_recv: _processed, + control_sender, + .. + } = setup(); + let config = Config { + min_confirmations: NonZeroU64::try_from(100) + .expect("Test wasn't set up correctly"), + ..Config::default() + }; + let oracle = + start_with_default_config(oracle, control_sender, config).await; + // Increase height above the configured minimum confirmations + admin_channel + .send(TestCmd::NewHeight( + u64::from(config.min_confirmations).into(), + )) + .expect("Test failed"); + + // confirmed after 100 blocks + let first_event = ChangedContract { + name: "Test".to_string(), + address: EthAddress([0; 20]), + } + .encode(); + + // confirmed after 125 blocks + let second_event = RawTransfersToEthereum { + transfers: vec![TransferToEthereum { + amount: Default::default(), + asset: EthAddress([0; 20]), + receiver: EthAddress([1; 20]), + }], + nonce: 1.into(), + confirmations: 125, + } + .encode(); + + // send in the events to the logs + let (sender, seen_second) = channel(); + admin_channel + .send(TestCmd::NewEvent { + event_type: MockEventType::TransferToEthereum, + data: second_event, + height: 125, + seen: sender, + }) + .expect("Test failed"); + let (sender, _recv) = channel(); + admin_channel + .send(TestCmd::NewEvent { + event_type: MockEventType::NewContract, + data: first_event, + height: 100, + seen: sender, + }) + .expect("Test failed"); + + // increase block height so first event is confirmed but second is + // not. + admin_channel + .send(TestCmd::NewHeight(Uint256::from(200u32))) + .expect("Test failed"); + // check the correct event is received + let event = eth_recv.recv().await.expect("Test failed"); + if let EthereumEvent::NewContract { name, address } = event { + assert_eq!(name.as_str(), "Test"); + assert_eq!(address, EthAddress([0; 20])); + } else { + panic!("Test failed, {:?}", event); + } + + // check no other events are received + let mut time = std::time::Duration::from_secs(1); + while time > std::time::Duration::from_millis(10) { + assert!(eth_recv.try_recv().is_err()); + time -= std::time::Duration::from_millis(10); + } + + // increase block height so second event is emitted + admin_channel + .send(TestCmd::NewHeight(Uint256::from(225u32))) + .expect("Test failed"); + // wait until event is emitted + seen_second.await.expect("Test failed"); + // increase block height so second event is confirmed + admin_channel + .send(TestCmd::NewHeight(Uint256::from(250u32))) + .expect("Test failed"); + // check correct event is received + let event = eth_recv.recv().await.expect("Test failed"); + if let EthereumEvent::TransfersToEthereum { mut transfers, .. } = event + { + assert_eq!(transfers.len(), 1); + let transfer = transfers.remove(0); + assert_eq!( + transfer, + TransferToEthereum { + amount: Default::default(), + asset: EthAddress([0; 20]), + receiver: EthAddress([1; 20]), + } + ); + } else { + panic!("Test failed"); + } + + drop(eth_recv); + oracle.await.expect("Test failed"); + } + + /// Test that Ethereum blocks are processed in sequence up to the latest + /// block that has reached the minimum number of confirmations + #[tokio::test] + async fn test_blocks_checked_sequence() { + let TestPackage { + oracle, + eth_recv, + admin_channel, + mut blocks_processed_recv, + control_sender, + .. + } = setup(); + let config = Config::default(); + let oracle = + start_with_default_config(oracle, control_sender, config).await; + + // set the height of the chain such that there are some blocks deep + // enough to be considered confirmed by the oracle + let confirmed_block_height = 9; // all blocks up to and including this block have enough confirmations + let synced_block_height = + u64::from(config.min_confirmations) + confirmed_block_height; + for height in 0..synced_block_height + 1 { + admin_channel + .send(TestCmd::NewHeight(Uint256::from(height))) + .expect("Test failed"); + } + // check that the oracle indeed processes the confirmed blocks + for height in 0u64..confirmed_block_height + 1 { + let block_processed = + timeout(Duration::from_secs(3), blocks_processed_recv.recv()) + .await + .expect("Timed out waiting for block to be checked") + .unwrap(); + assert_eq!(block_processed, Uint256::from(height)); + } + + // check that the oracle hasn't yet checked any further blocks + // TODO: check this in a deterministic way rather than just waiting a + // bit + assert!( + timeout(Duration::from_secs(1), blocks_processed_recv.recv()) + .await + .is_err() + ); + + // increase the height of the chain by one, and check that the oracle + // processed the next confirmed block + let synced_block_height = synced_block_height + 1; + admin_channel + .send(TestCmd::NewHeight(Uint256::from(synced_block_height))) + .expect("Test failed"); + + let block_processed = + timeout(Duration::from_secs(3), blocks_processed_recv.recv()) + .await + .expect("Timed out waiting for block to be checked") + .unwrap(); + assert_eq!(block_processed, Uint256::from(confirmed_block_height + 1)); + + drop(eth_recv); + oracle.await.expect("Test failed"); + } + + /// Test that if the Ethereum RPC endpoint returns a latest block that is + /// more than one block later than the previous latest block we received, we + /// still check all the blocks in between + #[tokio::test] + async fn test_all_blocks_checked() { + let TestPackage { + oracle, + eth_recv, + admin_channel, + mut blocks_processed_recv, + control_sender, + .. + } = setup(); + let config = Config::default(); + let oracle = + start_with_default_config(oracle, control_sender, config).await; + + let confirmed_block_height = 9; // all blocks up to and including this block have enough confirmations + let synced_block_height = + u64::from(config.min_confirmations) + confirmed_block_height; + admin_channel + .send(TestCmd::NewHeight(Uint256::from(synced_block_height))) + .expect("Test failed"); + + // check that the oracle has indeed processed the first `n` blocks, even + // though the first latest block that the oracle received was not 0 + for height in 0u64..confirmed_block_height + 1 { + let block_processed = + timeout(Duration::from_secs(3), blocks_processed_recv.recv()) + .await + .expect("Timed out waiting for block to be checked") + .unwrap(); + assert_eq!(block_processed, Uint256::from(height)); + } + + // the next time the oracle checks, the latest block will have increased + // by more than one + let difference = 10; + let synced_block_height = synced_block_height + difference; + admin_channel + .send(TestCmd::NewHeight(Uint256::from(synced_block_height))) + .expect("Test failed"); + + // check that the oracle still checks the blocks inbetween + for height in (confirmed_block_height + 1) + ..(confirmed_block_height + difference + 1) + { + let block_processed = + timeout(Duration::from_secs(3), blocks_processed_recv.recv()) + .await + .expect("Timed out waiting for block to be checked") + .unwrap(); + assert_eq!(block_processed, Uint256::from(height)); + } + + drop(eth_recv); + oracle.await.expect("Test failed"); + } +} diff --git a/apps/src/lib/node/ledger/ethereum_node/test_tools/events_endpoint.rs b/apps/src/lib/node/ledger/ethereum_node/test_tools/events_endpoint.rs new file mode 100644 index 0000000000..0bf9c3a7dd --- /dev/null +++ b/apps/src/lib/node/ledger/ethereum_node/test_tools/events_endpoint.rs @@ -0,0 +1,85 @@ +use borsh::BorshDeserialize; +use namada::types::ethereum_events::EthereumEvent; +use tokio::sync::mpsc::Sender as BoundedSender; +use tokio::sync::oneshot::{Receiver, Sender}; +use warp::reply::WithStatus; +use warp::Filter; + +/// The default IP address and port on which the events endpoint will listen. +const DEFAULT_LISTEN_ADDR: ([u8; 4], u16) = ([0, 0, 0, 0], 3030); + +/// The endpoint to which Borsh-serialized Ethereum events should be sent to, +/// via an HTTP POST request. +const EVENTS_POST_ENDPOINT: &str = "eth_events"; + +/// Starts a [`warp::Server`] that listens for Borsh-serialized Ethereum events +/// and then forwards them to `sender`. It shuts down if a signal is sent on the +/// `abort_recv` channel. +pub async fn serve( + sender: BoundedSender, + abort_recv: Receiver>, +) { + tracing::info!(?DEFAULT_LISTEN_ADDR, "Ethereum event endpoint is starting"); + let eth_events = warp::post() + .and(warp::path(EVENTS_POST_ENDPOINT)) + .and(warp::body::bytes()) + .then(move |bytes: bytes::Bytes| send(bytes, sender.clone())); + + let (_, future) = warp::serve(eth_events).bind_with_graceful_shutdown( + DEFAULT_LISTEN_ADDR, + async move { + tracing::info!( + ?DEFAULT_LISTEN_ADDR, + "Starting to listen for Borsh-serialized Ethereum events" + ); + match abort_recv.await { + Ok(abort_resp_send) => { + if abort_resp_send.send(()).is_err() { + tracing::warn!( + "Received signal to abort but failed to respond, \ + will abort now" + ) + } + } + Err(_) => tracing::warn!( + "Channel for receiving signal to abort was closed \ + abruptly, will abort now" + ), + }; + tracing::info!( + ?DEFAULT_LISTEN_ADDR, + "Stopping listening for Borsh-serialized Ethereum events" + ); + }, + ); + future.await +} + +/// Callback to send out events from the oracle +async fn send( + bytes: bytes::Bytes, + sender: BoundedSender, +) -> WithStatus<&'static str> { + tracing::info!(len = bytes.len(), "Received request"); + let event = match EthereumEvent::try_from_slice(&bytes) { + Ok(event) => event, + Err(error) => { + tracing::warn!(?error, "Couldn't handle request"); + return warp::reply::with_status( + "Bad request", + warp::http::StatusCode::BAD_REQUEST, + ); + } + }; + tracing::debug!("Serialized event - {:#?}", event); + match sender.send(event).await { + Ok(()) => warp::reply::with_status("OK", warp::http::StatusCode::OK), + Err(error) => { + tracing::warn!(?error, "Couldn't send event"); + warp::reply::with_status( + "Internal server error", + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + ) + } + } +} diff --git a/apps/src/lib/node/ledger/ethereum_node/test_tools/mod.rs b/apps/src/lib/node/ledger/ethereum_node/test_tools/mod.rs new file mode 100644 index 0000000000..6a045754c6 --- /dev/null +++ b/apps/src/lib/node/ledger/ethereum_node/test_tools/mod.rs @@ -0,0 +1,180 @@ +pub mod events_endpoint; + +#[cfg(test)] +pub mod mock_web3_client { + use std::cell::RefCell; + use std::fmt::Debug; + + use num256::Uint256; + use tokio::sync::mpsc::{ + unbounded_channel, UnboundedReceiver, UnboundedSender, + }; + use tokio::sync::oneshot::Sender; + use web30::types::Log; + + use super::super::events::signatures::*; + use super::super::{Error, Result}; + + /// Commands we can send to the mock client + #[derive(Debug)] + pub enum TestCmd { + Normal, + Unresponsive, + NewHeight(Uint256), + NewEvent { + event_type: MockEventType, + data: Vec, + height: u32, + seen: Sender<()>, + }, + } + + /// The type of events supported + #[derive(Debug, PartialEq)] + pub enum MockEventType { + TransferToNamada, + TransferToEthereum, + ValSetUpdate, + NewContract, + UpgradedContract, + BridgeWhitelist, + } + + /// A pointer to a mock Web3 client. The + /// reason is for interior mutability. + pub struct Web3(RefCell); + + /// A mock of a web3 api client connected to an ethereum fullnode. + /// It is not connected to a full node and is fully controllable + /// via a channel to allow us to mock different behavior for + /// testing purposes. + pub struct Web3Client { + cmd_channel: UnboundedReceiver, + active: bool, + latest_block_height: Uint256, + events: Vec<(MockEventType, Vec, u32, Sender<()>)>, + blocks_processed: UnboundedSender, + last_block_processed: Option, + } + + impl Web3 { + /// This method is part of the Web3 api we use, + /// but is not meant to be used in tests + #[allow(dead_code)] + pub fn new(_: &str, _: std::time::Duration) -> Self { + panic!( + "Method is here for api completeness. It is not meant to be \ + used in tests." + ) + } + + /// Return a new client and a separate sender + /// to send in admin commands + pub fn setup() + -> (UnboundedSender, UnboundedReceiver, Self) + { + // we can only send one command at a time. + let (cmd_sender, cmd_channel) = unbounded_channel(); + let (block_processed_send, block_processed_recv) = + unbounded_channel(); + ( + cmd_sender, + block_processed_recv, + Self(RefCell::new(Web3Client { + cmd_channel, + active: true, + latest_block_height: Default::default(), + events: vec![], + blocks_processed: block_processed_send, + last_block_processed: None, + })), + ) + } + + /// Check and apply new incoming commands + fn check_cmd_channel(&self) { + let cmd = + if let Ok(cmd) = self.0.borrow_mut().cmd_channel.try_recv() { + cmd + } else { + return; + }; + match cmd { + TestCmd::Normal => self.0.borrow_mut().active = true, + TestCmd::Unresponsive => self.0.borrow_mut().active = false, + TestCmd::NewHeight(height) => { + self.0.borrow_mut().latest_block_height = height + } + TestCmd::NewEvent { + event_type: ty, + data, + height, + seen, + } => self.0.borrow_mut().events.push((ty, data, height, seen)), + } + } + + /// Gets the latest block number send in from the + /// command channel if we have not set the client to + /// act unresponsive. + pub async fn eth_block_number(&self) -> Result { + self.check_cmd_channel(); + Ok(self.0.borrow().latest_block_height.clone()) + } + + /// Gets the events (for the appropriate signature) that + /// have been added from the command channel unless the + /// client has not been set to act unresponsive. + pub async fn check_for_events( + &self, + block_to_check: Uint256, + _: Option, + _: impl Debug, + mut events: Vec<&str>, + ) -> Result> { + self.check_cmd_channel(); + if self.0.borrow().active { + let ty = match events.remove(0) { + TRANSFER_TO_NAMADA_SIG => MockEventType::TransferToNamada, + TRANSFER_TO_ETHEREUM_SIG => { + MockEventType::TransferToEthereum + } + VALIDATOR_SET_UPDATE_SIG => MockEventType::ValSetUpdate, + NEW_CONTRACT_SIG => MockEventType::NewContract, + UPGRADED_CONTRACT_SIG => MockEventType::UpgradedContract, + UPDATE_BRIDGE_WHITELIST_SIG => { + MockEventType::BridgeWhitelist + } + _ => return Ok(vec![]), + }; + let mut logs = vec![]; + let mut events = vec![]; + let mut client = self.0.borrow_mut(); + std::mem::swap(&mut client.events, &mut events); + for (event_ty, data, height, seen) in events.into_iter() { + if event_ty == ty && block_to_check >= Uint256::from(height) + { + seen.send(()).unwrap(); + logs.push(Log { + data: data.into(), + ..Default::default() + }); + } else { + client.events.push((event_ty, data, height, seen)); + } + } + if client.last_block_processed.as_ref() < Some(&block_to_check) + { + client + .blocks_processed + .send(block_to_check.clone()) + .unwrap(); + client.last_block_processed = Some(block_to_check); + } + Ok(logs) + } else { + Err(Error::Runtime("Uh oh, I'm not responding".into())) + } + } + } +} diff --git a/apps/src/lib/node/ledger/mod.rs b/apps/src/lib/node/ledger/mod.rs index ca31310b74..f6eb4c5f5b 100644 --- a/apps/src/lib/node/ledger/mod.rs +++ b/apps/src/lib/node/ledger/mod.rs @@ -1,5 +1,6 @@ mod abortable; mod broadcaster; +mod ethereum_node; mod shell; mod shims; pub mod storage; @@ -14,20 +15,24 @@ use std::thread; use byte_unit::Byte; use futures::future::TryFutureExt; use namada::ledger::governance::storage as gov_storage; +use namada::types::ethereum_events::EthereumEvent; use namada::types::storage::Key; use once_cell::unsync::Lazy; use sysinfo::{RefreshKind, System, SystemExt}; +use tokio::sync::mpsc; use tokio::task; use tower::ServiceBuilder; use self::abortable::AbortableSpawner; +use self::ethereum_node::eth_fullnode; use self::shims::abcipp_shim::AbciService; use crate::config::utils::num_of_threads; -use crate::config::TendermintMode; +use crate::config::{ethereum_bridge, TendermintMode}; use crate::facade::tendermint_proto::abci::CheckTxType; use crate::facade::tower_abci::{response, split, Server}; use crate::node::ledger::broadcaster::Broadcaster; use crate::node::ledger::config::genesis; +use crate::node::ledger::ethereum_node::oracle; use crate::node::ledger::shell::{Error, MempoolTxType, Shell}; use crate::node::ledger::shims::abcipp_shim::AbcippShim; use crate::node::ledger::shims::abcipp_shim_types::shim::{Request, Response}; @@ -39,6 +44,10 @@ const ENV_VAR_TOKIO_THREADS: &str = "NAMADA_TOKIO_THREADS"; /// Env. var to set a number of Rayon global worker threads const ENV_VAR_RAYON_THREADS: &str = "NAMADA_RAYON_THREADS"; +/// The maximum number of Ethereum events the channel between +/// the oracle and the shell can hold. +const ORACLE_CHANNEL_BUFFER_SIZE: usize = 1000; + // Until ABCI++ is ready, the shim provides the service implementation. // We will add this part back in once the shim is no longer needed. //``` @@ -205,6 +214,9 @@ pub fn reset(config: config::Ledger) -> Result<(), shell::Error> { /// - A shell which contains an ABCI server, for talking to the Tendermint /// node. /// - A [`Broadcaster`], for the ledger to submit txs to Tendermint's mempool. +/// - An Ethereum full node. +/// - An oracle, to receive events from the Ethereum full node, and forward +/// them to the ledger. /// /// All must be alive for correct functioning. async fn run_aux(config: config::Ledger, wasm_dir: PathBuf) { @@ -217,10 +229,32 @@ async fn run_aux(config: config::Ledger, wasm_dir: PathBuf) { // Start Tendermint node let tendermint_node = start_tendermint(&mut spawner, &config); + // Start managed Ethereum node if necessary + let eth_node = maybe_start_geth(&mut spawner, &config).await; + + // Start oracle if necessary + let (eth_receiver, oracle) = + match maybe_start_ethereum_oracle(&mut spawner, &config).await { + EthereumOracleTask::NotEnabled { + handle, + eth_receiver, + } + | EthereumOracleTask::Oracle { + handle, + eth_receiver, + .. + } + | EthereumOracleTask::EventsEndpoint { + handle, + eth_receiver, + } => (Some(eth_receiver), handle), + }; + // Start ABCI server and broadcaster (the latter only if we are a validator // node) let (abci, broadcaster, shell_handler) = start_abci_broadcaster_shell( &mut spawner, + eth_receiver, wasm_dir, setup_data, config, @@ -230,10 +264,11 @@ async fn run_aux(config: config::Ledger, wasm_dir: PathBuf) { let aborted = spawner.wait_for_abort().await.child_terminated(); // Wait for all managed tasks to finish. - let res = tokio::try_join!(tendermint_node, abci, broadcaster); + let res = + tokio::try_join!(tendermint_node, eth_node, abci, oracle, broadcaster); match res { - Ok((tendermint_res, abci_res, _)) => { + Ok((tendermint_res, _, abci_res, _, _)) => { // we ignore errors on user-initiated shutdown if aborted { if let Err(err) = tendermint_res { @@ -362,14 +397,12 @@ async fn run_aux_setup( } } -/// Launches two tasks into the asynchronous runtime: -/// -/// 1. An ABCI server. -/// 2. A service for broadcasting transactions via an HTTP client. -/// -/// Lastly, this function executes an ABCI shell on a new OS thread. +/// This function spawns an ABCI server and a [`Broadcaster`] into the +/// asynchronous runtime. Additionally, it executes a shell in +/// a new OS thread, to drive the ABCI server. fn start_abci_broadcaster_shell( spawner: &mut AbortableSpawner, + eth_receiver: Option>, wasm_dir: PathBuf, setup_data: RunAuxSetup, config: config::Ledger, @@ -387,8 +420,7 @@ fn start_abci_broadcaster_shell( // Channels for validators to send protocol txs to be broadcast to the // broadcaster service - let (broadcaster_sender, broadcaster_receiver) = - tokio::sync::mpsc::unbounded_channel(); + let (broadcaster_sender, broadcaster_receiver) = mpsc::unbounded_channel(); // Start broadcaster let broadcaster = if matches!( @@ -432,6 +464,7 @@ fn start_abci_broadcaster_shell( config, wasm_dir, broadcaster_sender, + eth_receiver, &db_cache, vp_wasm_compilation_cache, tx_wasm_compilation_cache, @@ -463,8 +496,11 @@ fn start_abci_broadcaster_shell( TendermintMode::Validator => { tracing::info!("This node is a validator"); } - TendermintMode::Full | TendermintMode::Seed => { - tracing::info!("This node is not a validator"); + TendermintMode::Full => { + tracing::info!("This node is a fullnode"); + } + TendermintMode::Seed => { + tracing::info!("This node is a seednode"); } } shell.run() @@ -587,6 +623,169 @@ fn start_tendermint( }) } +/// Represents an Ethereum oracle task and associated channels. +enum EthereumOracleTask { + NotEnabled { + // TODO(namada#459): we have to return a dummy handle for the moment, + // until `run_aux` is refactored + handle: task::JoinHandle<()>, + // TODO(namada#521): we have to pass back a dummy channel here + // unfortunately, as validator shells still expect one even in the case + // where Ethereum bridge componentry is not enabled + eth_receiver: mpsc::Receiver, + }, + Oracle { + handle: task::JoinHandle<()>, + eth_receiver: mpsc::Receiver, + // TODO(namada#686): will be used by the Shell + _control_sender: oracle::control::Sender, + }, + EventsEndpoint { + handle: task::JoinHandle<()>, + eth_receiver: mpsc::Receiver, + }, +} + +/// Potentially starts an Ethereum event oracle. +async fn maybe_start_ethereum_oracle( + spawner: &mut AbortableSpawner, + config: &config::Ledger, +) -> EthereumOracleTask { + let ethereum_url = config.ethereum_bridge.oracle_rpc_endpoint.clone(); + + // Start the oracle for listening to Ethereum events + let (eth_sender, eth_receiver) = mpsc::channel(ORACLE_CHANNEL_BUFFER_SIZE); + + match config.ethereum_bridge.mode { + ethereum_bridge::ledger::Mode::Managed + | ethereum_bridge::ledger::Mode::Remote => { + let (control_sender, control_receiver) = oracle::control::channel(); + let handle = ethereum_node::oracle::run_oracle( + ethereum_url, + eth_sender, + control_receiver, + ); + + // TODO(namada#686): pass `oracle_control_sender` to the shell for + // initialization from storage, rather than using a + // hardcoded config + control_sender + .send(oracle::control::Command::SendConfig { + config: oracle::config::Config::default(), + }) + .await + .expect("Could not send initial configuration to the oracle!"); + EthereumOracleTask::Oracle { + handle, + eth_receiver, + _control_sender: control_sender, + } + } + ethereum_bridge::ledger::Mode::EventsEndpoint => { + let (oracle_abort_send, oracle_abort_recv) = + tokio::sync::oneshot::channel::>( + ); + let handle = spawner + .spawn_abortable( + "Ethereum Events Endpoint", + move |aborter| async move { + ethereum_node::test_tools::events_endpoint::serve( + eth_sender, + oracle_abort_recv, + ) + .await; + tracing::info!( + "Ethereum events endpoint is no longer running." + ); + + drop(aborter); + }, + ) + .with_cleanup(async move { + let (oracle_abort_resp_send, oracle_abort_resp_recv) = + tokio::sync::oneshot::channel::<()>(); + + if let Ok(()) = + oracle_abort_send.send(oracle_abort_resp_send) + { + match oracle_abort_resp_recv.await { + Ok(()) => {} + Err(err) => { + tracing::error!( + "Failed to receive an abort response from \ + the Ethereum events endpoint task: {}", + err + ); + } + } + } + }); + EthereumOracleTask::EventsEndpoint { + handle, + eth_receiver, + } + } + ethereum_bridge::ledger::Mode::Off => EthereumOracleTask::NotEnabled { + handle: spawn_dummy_task(()), + eth_receiver, + }, + } +} + +/// Launches a new task managing a `geth` process into the asynchronous +/// runtime, and returns its [`task::JoinHandle`]. +/// +/// An oracle is also returned, along with its associated channel, +/// for receiving Ethereum events from `geth`. +async fn maybe_start_geth( + spawner: &mut AbortableSpawner, + config: &config::Ledger, +) -> task::JoinHandle<()> { + if !matches!(config.tendermint.tendermint_mode, TendermintMode::Validator) + || !matches!( + config.ethereum_bridge.mode, + ethereum_bridge::ledger::Mode::Managed + ) + { + return spawn_dummy_task(()); + } + + let ethereum_url = config.ethereum_bridge.oracle_rpc_endpoint.clone(); + + // Boot up geth and wait for it to finish syncing + let eth_node = eth_fullnode::EthereumNode::new(ðereum_url) + .await + .expect("Unable to start the Ethereum fullnode"); + + // Run geth in the background + let (eth_abort_send, eth_abort_recv) = + tokio::sync::oneshot::channel::>(); + let eth_node = spawner + .spawn_abortable("Ethereum", move |aborter| async move { + ethereum_node::monitor(eth_node, eth_abort_recv).await; + tracing::info!("Ethereum fullnode is no longer running."); + + drop(aborter); + }) + .with_cleanup(async move { + let (eth_abort_resp_send, eth_abort_resp_recv) = + tokio::sync::oneshot::channel::<()>(); + + if let Ok(()) = eth_abort_send.send(eth_abort_resp_send) { + match eth_abort_resp_recv.await { + Ok(()) => {} + Err(err) => { + tracing::error!( + "Failed to receive a response from Ethereum: {}", + err + ); + } + } + } + }); + eth_node +} + /// Spawn a dummy asynchronous task into the runtime, /// which will resolve instantly. fn spawn_dummy_task(ready: T) -> task::JoinHandle { diff --git a/apps/src/lib/node/ledger/shell/block_space_alloc.rs b/apps/src/lib/node/ledger/shell/block_space_alloc.rs new file mode 100644 index 0000000000..d3166ab8aa --- /dev/null +++ b/apps/src/lib/node/ledger/shell/block_space_alloc.rs @@ -0,0 +1,511 @@ +//! Primitives that facilitate keeping track of the number +//! of bytes utilized by some Tendermint consensus round's proposal. +//! +//! This is important, because Tendermint places an upper bound +//! on the size of a block, rejecting blocks whose size exceeds +//! the limit stated in [`RequestPrepareProposal`]. +//! +//! The code in this module doesn't perform any deserializing to +//! verify if we are, in fact, allocating space for the correct +//! kind of tx for the current [`BlockSpaceAllocator`] state. It +//! is up to `PrepareProposal` to dispatch the correct kind of tx +//! into the current state of the allocator. +//! +//! # How space is allocated +//! +//! In the current implementation, we allocate space for transactions +//! in the following order of preference: +//! +//! - First, we allot space for DKG decrypted txs. Decrypted txs take up as much +//! space as needed. We will see, shortly, why in practice this is fine. +//! - Next, we allot space for protocol txs. Protocol txs get half of the +//! remaining block space allotted to them. +//! - Finally, we allot space for DKG encrypted txs. We allow DKG encrypted txs +//! to take up at most 1/3 of the total block space. +//! - If any space remains, we try to fit any leftover protocol txs in the +//! block. +//! +//! Since at some fixed height `H` decrypted txs only take up as +//! much space as the encrypted txs from height `H - 1`, and we +//! restrict the space of encrypted txs to at most 1/3 of the +//! total block space, we roughly divide the Tendermint block +//! space in 3, for each major type of tx. + +pub mod states; + +// TODO: what if a tx has a size greater than the threshold for +// its bin? how do we handle this? if we keep it in the mempool +// forever, it'll be a DoS vec, as we can make nodes run out of +// memory! maybe we should allow block decisions for txs that are +// too big to fit in their respective bin? in these special block +// decisions, we would only decide proposals with "large" txs?? +// +// MAYBE: in the state machine impl, reset to beginning state, and +// and alloc space for large tx right at the start. the problem with +// this is that then we may not have enough space for decrypted txs + +// TODO: panic if we don't have enough space reserved for a +// decrypted tx; in theory, we should always have enough space +// reserved for decrypted txs, given the invariants of the state +// machine + +// TODO: refactor our measure of space to also reflect gas costs. +// the total gas of all chosen txs cannot exceed the configured max +// gas per block, otherwise a proposal will be rejected! + +use std::marker::PhantomData; + +use namada::core::ledger::storage::{self, Storage}; +use namada::ledger::pos::PosQueries; + +#[allow(unused_imports)] +use crate::facade::tendermint_proto::abci::RequestPrepareProposal; + +/// Block space allocation failure status responses. +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub enum AllocFailure { + /// The transaction can only be included in an upcoming block. + /// + /// We return the space left in the tx bin for logging purposes. + Rejected { bin_space_left: u64 }, + /// The transaction would overflow the allotted bin space, + /// therefore it needs to be handled separately. + /// + /// We return the size of the tx bin for logging purposes. + OverflowsBin { bin_size: u64 }, +} + +/// Allotted space for a batch of transactions in some proposed block, +/// measured in bytes. +/// +/// We keep track of the current space utilized by: +/// +/// - Protocol transactions. +/// - DKG decrypted transactions. +/// - DKG encrypted transactions. +#[derive(Debug, Default)] +pub struct BlockSpaceAllocator { + /// The current state of the [`BlockSpaceAllocator`] state machine. + _state: PhantomData<*const State>, + /// The total space Tendermint has allotted to the + /// application for the current block height. + block: TxBin, + /// The current space utilized by protocol transactions. + protocol_txs: TxBin, + /// The current space utilized by DKG encrypted transactions. + encrypted_txs: TxBin, + /// The current space utilized by DKG decrypted transactions. + decrypted_txs: TxBin, +} + +impl From<&Storage> + for BlockSpaceAllocator +where + D: storage::DB + for<'iter> storage::DBIter<'iter>, + H: storage::StorageHasher, +{ + #[inline] + fn from(storage: &Storage) -> Self { + Self::init(storage.get_max_proposal_bytes().get()) + } +} + +impl BlockSpaceAllocator { + /// Construct a new [`BlockSpaceAllocator`], with an upper bound + /// on the max size of all txs in a block defined by Tendermint. + #[inline] + pub fn init(tendermint_max_block_space_in_bytes: u64) -> Self { + let max = tendermint_max_block_space_in_bytes; + Self { + _state: PhantomData, + block: TxBin::init(max), + protocol_txs: TxBin::default(), + encrypted_txs: TxBin::default(), + // decrypted txs can use as much space as needed; in practice, + // we'll only need, at most, the amount of space reserved for + // encrypted txs at the prev block height + decrypted_txs: TxBin::init(max), + } + } +} + +impl BlockSpaceAllocator { + /// Return the amount of space left to initialize in all + /// [`TxBin`] instances. + /// + /// This is calculated based on the difference between the Tendermint + /// block space for a given round and the sum of the allotted space + /// to each [`TxBin`] instance in a [`BlockSpaceAllocator`]. + #[inline] + fn uninitialized_space_in_bytes(&self) -> u64 { + let total_bin_space = self.protocol_txs.allotted_space_in_bytes + + self.encrypted_txs.allotted_space_in_bytes + + self.decrypted_txs.allotted_space_in_bytes; + self.block.allotted_space_in_bytes - total_bin_space + } + + /// Claim all the space used by the [`TxBin`] instances + /// as block space. + #[inline] + fn claim_block_space(&mut self) { + let used_space = self.protocol_txs.occupied_space_in_bytes + + self.encrypted_txs.occupied_space_in_bytes + + self.decrypted_txs.occupied_space_in_bytes; + + self.block.occupied_space_in_bytes = used_space; + + self.decrypted_txs = TxBin::default(); + self.protocol_txs = TxBin::default(); + self.encrypted_txs = TxBin::default(); + } +} + +/// Allotted space for a batch of transactions of the same kind in some +/// proposed block, measured in bytes. +#[derive(Debug, Copy, Clone, Default)] +pub struct TxBin { + /// The current space utilized by the batch of transactions. + occupied_space_in_bytes: u64, + /// The maximum space the batch of transactions may occupy. + allotted_space_in_bytes: u64, +} + +impl TxBin { + /// Return a new [`TxBin`] with a total allotted space equal to the + /// floor of the fraction `frac` of the available block space `max_bytes`. + #[inline] + pub fn init_over_ratio(max_bytes: u64, frac: threshold::Threshold) -> Self { + let allotted_space_in_bytes = frac.over(max_bytes); + Self { + allotted_space_in_bytes, + occupied_space_in_bytes: 0, + } + } + + /// Return the amount of space left in this [`TxBin`]. + #[inline] + pub fn space_left_in_bytes(&self) -> u64 { + self.allotted_space_in_bytes - self.occupied_space_in_bytes + } + + /// Construct a new [`TxBin`], with a capacity of `max_bytes`. + #[inline] + pub fn init(max_bytes: u64) -> Self { + Self { + allotted_space_in_bytes: max_bytes, + occupied_space_in_bytes: 0, + } + } + + /// Shrink the allotted space of this [`TxBin`] to whatever + /// space is currently being utilized. + #[inline] + pub fn shrink_to_fit(&mut self) { + self.allotted_space_in_bytes = self.occupied_space_in_bytes; + } + + /// Try to dump a new transaction into this [`TxBin`]. + /// + /// Signal the caller if the tx is larger than its max + /// allotted bin space. + pub fn try_dump(&mut self, tx: &[u8]) -> Result<(), AllocFailure> { + let tx_len = tx.len() as u64; + if tx_len > self.allotted_space_in_bytes { + let bin_size = self.allotted_space_in_bytes; + return Err(AllocFailure::OverflowsBin { bin_size }); + } + let occupied = self.occupied_space_in_bytes + tx_len; + if occupied <= self.allotted_space_in_bytes { + self.occupied_space_in_bytes = occupied; + Ok(()) + } else { + let bin_space_left = self.space_left_in_bytes(); + Err(AllocFailure::Rejected { bin_space_left }) + } + } +} + +pub mod threshold { + //! Transaction allotment thresholds. + + use num_rational::Ratio; + + /// Threshold over a portion of block space. + #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] + pub struct Threshold(Ratio); + + impl Threshold { + /// Return a new [`Threshold`]. + const fn new(numer: u64, denom: u64) -> Self { + // constrain ratio to a max of 1 + let numer = if numer > denom { denom } else { numer }; + Self(Ratio::new_raw(numer, denom)) + } + + /// Return a [`Threshold`] over some free space. + pub fn over(self, free_space_in_bytes: u64) -> u64 { + (self.0 * free_space_in_bytes).to_integer() + } + } + + /// Divide free space in three. + pub const ONE_THIRD: Threshold = Threshold::new(1, 3); + + /// Divide free space in two. + pub const ONE_HALF: Threshold = Threshold::new(1, 2); +} + +#[cfg(test)] +mod tests { + use std::cell::RefCell; + + use assert_matches::assert_matches; + use proptest::prelude::*; + + use super::states::{ + NextState, NextStateWithEncryptedTxs, NextStateWithoutEncryptedTxs, + TryAlloc, + }; + use super::*; + use crate::node::ledger::shims::abcipp_shim_types::shim::TxBytes; + + /// Proptest generated txs. + #[derive(Debug)] + struct PropTx { + tendermint_max_block_space_in_bytes: u64, + protocol_txs: Vec, + encrypted_txs: Vec, + decrypted_txs: Vec, + } + + /// Check that at most 1/3 of the block space is + /// reserved for each kind of tx type, in the + /// allocator's common path. + #[test] + fn test_txs_are_evenly_split_across_block() { + const BLOCK_SIZE: u64 = 60; + + // reserve block space for decrypted txs + let mut alloc = BlockSpaceAllocator::init(BLOCK_SIZE); + + // assume we got ~1/3 encrypted txs at the prev block + assert!(alloc.try_alloc(&[0; 18]).is_ok()); + + // reserve block space for protocol txs + let mut alloc = alloc.next_state(); + + // the space we allotted to decrypted txs was shrunk to + // the total space we actually used up + assert_eq!(alloc.decrypted_txs.allotted_space_in_bytes, 18); + + // check that the allotted space for protocol txs is correct + assert_eq!(21, (BLOCK_SIZE - 18) / 2); + assert_eq!(alloc.protocol_txs.allotted_space_in_bytes, 21); + + // fill up the block space with protocol txs + assert!(alloc.try_alloc(&[0; 17]).is_ok()); + assert_matches!( + alloc.try_alloc(&[0; (21 - 17) + 1]), + Err(AllocFailure::Rejected { .. }) + ); + + // reserve block space for encrypted txs + let mut alloc = alloc.next_state_with_encrypted_txs(); + + // check that space was shrunk + assert_eq!(alloc.protocol_txs.allotted_space_in_bytes, 17); + + // check that we reserve at most 1/3 of the block space to + // encrypted txs + assert_eq!(25, BLOCK_SIZE - 17 - 18); + assert_eq!(20, BLOCK_SIZE / 3); + assert_eq!(alloc.encrypted_txs.allotted_space_in_bytes, 20); + + // fill up the block space with encrypted txs + assert!(alloc.try_alloc(&[0; 20]).is_ok()); + assert_matches!( + alloc.try_alloc(&[0; 1]), + Err(AllocFailure::Rejected { .. }) + ); + + // check that there is still remaining space left at the end + let mut alloc = alloc.next_state(); + let remaining_space = alloc.block.allotted_space_in_bytes + - alloc.block.occupied_space_in_bytes; + assert_eq!(remaining_space, 5); + + // fill up the remaining space + assert!(alloc.try_alloc(&[0; 5]).is_ok()); + assert_matches!( + alloc.try_alloc(&[0; 1]), + Err(AllocFailure::Rejected { .. }) + ); + } + + // Test that we cannot include encrypted txs in a block + // when the state invariants banish them from inclusion. + #[test] + fn test_encrypted_txs_are_rejected() { + let alloc = BlockSpaceAllocator::init(1234); + let alloc = alloc.next_state(); + let mut alloc = alloc.next_state_without_encrypted_txs(); + assert_matches!( + alloc.try_alloc(&[0; 1]), + Err(AllocFailure::Rejected { .. }) + ); + } + + proptest! { + /// Check if we reject a tx when its respective bin + /// capacity has been reached on a [`BlockSpaceAllocator`]. + #[test] + fn test_reject_tx_on_bin_cap_reached(max in prop::num::u64::ANY) { + proptest_reject_tx_on_bin_cap_reached(max) + } + + /// Check if the sum of all individual bin allotments for a + /// [`BlockSpaceAllocator`] corresponds to the total space ceded + /// by Tendermint. + #[test] + fn test_bin_capacity_eq_provided_space(max in prop::num::u64::ANY) { + proptest_bin_capacity_eq_provided_space(max) + } + + /// Test that dumping txs whose total combined size + /// is less than the bin cap does not fill up the bin. + #[test] + fn test_tx_dump_doesnt_fill_up_bin(args in arb_transactions()) { + proptest_tx_dump_doesnt_fill_up_bin(args) + } + } + + /// Implementation of [`test_reject_tx_on_bin_cap_reached`]. + fn proptest_reject_tx_on_bin_cap_reached( + tendermint_max_block_space_in_bytes: u64, + ) { + let mut bins = + BlockSpaceAllocator::init(tendermint_max_block_space_in_bytes); + + // fill the entire bin of decrypted txs + bins.decrypted_txs.occupied_space_in_bytes = + bins.decrypted_txs.allotted_space_in_bytes; + + // make sure we can't dump any new decrypted txs in the bin + assert_matches!( + bins.try_alloc(b"arbitrary tx bytes"), + Err(AllocFailure::Rejected { .. }) + ); + } + + /// Implementation of [`test_bin_capacity_eq_provided_space`]. + fn proptest_bin_capacity_eq_provided_space( + tendermint_max_block_space_in_bytes: u64, + ) { + let bins = + BlockSpaceAllocator::init(tendermint_max_block_space_in_bytes); + assert_eq!(0, bins.uninitialized_space_in_bytes()); + } + + /// Implementation of [`test_tx_dump_doesnt_fill_up_bin`]. + fn proptest_tx_dump_doesnt_fill_up_bin(args: PropTx) { + let PropTx { + tendermint_max_block_space_in_bytes, + protocol_txs, + encrypted_txs, + decrypted_txs, + } = args; + + // produce new txs until the moment we would have + // filled up the bins. + // + // iterate over the produced txs to make sure we can keep + // dumping new txs without filling up the bins + + let bins = RefCell::new(BlockSpaceAllocator::init( + tendermint_max_block_space_in_bytes, + )); + let decrypted_txs = decrypted_txs.into_iter().take_while(|tx| { + let bin = bins.borrow().decrypted_txs; + let new_size = bin.occupied_space_in_bytes + tx.len() as u64; + new_size < bin.allotted_space_in_bytes + }); + for tx in decrypted_txs { + assert!(bins.borrow_mut().try_alloc(&tx).is_ok()); + } + + let bins = RefCell::new(bins.into_inner().next_state()); + let protocol_txs = protocol_txs.into_iter().take_while(|tx| { + let bin = bins.borrow().protocol_txs; + let new_size = bin.occupied_space_in_bytes + tx.len() as u64; + new_size < bin.allotted_space_in_bytes + }); + for tx in protocol_txs { + assert!(bins.borrow_mut().try_alloc(&tx).is_ok()); + } + + let bins = + RefCell::new(bins.into_inner().next_state_with_encrypted_txs()); + let encrypted_txs = encrypted_txs.into_iter().take_while(|tx| { + let bin = bins.borrow().encrypted_txs; + let new_size = bin.occupied_space_in_bytes + tx.len() as u64; + new_size < bin.allotted_space_in_bytes + }); + for tx in encrypted_txs { + assert!(bins.borrow_mut().try_alloc(&tx).is_ok()); + } + } + + prop_compose! { + /// Generate arbitrarily sized txs of different kinds. + fn arb_transactions() + // create base strategies + ( + (tendermint_max_block_space_in_bytes, protocol_tx_max_bin_size, encrypted_tx_max_bin_size, + decrypted_tx_max_bin_size) in arb_max_bin_sizes(), + ) + // compose strategies + ( + tendermint_max_block_space_in_bytes in Just(tendermint_max_block_space_in_bytes), + protocol_txs in arb_tx_list(protocol_tx_max_bin_size), + encrypted_txs in arb_tx_list(encrypted_tx_max_bin_size), + decrypted_txs in arb_tx_list(decrypted_tx_max_bin_size), + ) + -> PropTx { + PropTx { + tendermint_max_block_space_in_bytes, + protocol_txs, + encrypted_txs, + decrypted_txs, + } + } + } + + /// Return random bin sizes for a [`BlockSpaceAllocator`]. + fn arb_max_bin_sizes() -> impl Strategy + { + const MAX_BLOCK_SIZE_BYTES: u64 = 1000; + (1..=MAX_BLOCK_SIZE_BYTES).prop_map( + |tendermint_max_block_space_in_bytes| { + ( + tendermint_max_block_space_in_bytes, + threshold::ONE_THIRD + .over(tendermint_max_block_space_in_bytes) + as usize, + threshold::ONE_THIRD + .over(tendermint_max_block_space_in_bytes) + as usize, + threshold::ONE_THIRD + .over(tendermint_max_block_space_in_bytes) + as usize, + ) + }, + ) + } + + /// Return a list of txs. + fn arb_tx_list(max_bin_size: usize) -> impl Strategy>> { + const MAX_TX_NUM: usize = 64; + let tx = prop::collection::vec(prop::num::u8::ANY, 0..=max_bin_size); + prop::collection::vec(tx, 0..=MAX_TX_NUM) + } +} diff --git a/apps/src/lib/node/ledger/shell/block_space_alloc/states.rs b/apps/src/lib/node/ledger/shell/block_space_alloc/states.rs new file mode 100644 index 0000000000..dca808c09a --- /dev/null +++ b/apps/src/lib/node/ledger/shell/block_space_alloc/states.rs @@ -0,0 +1,174 @@ +//! All the states of the [`BlockSpaceAllocator`] state machine, +//! over the extent of a Tendermint consensus round +//! block proposal. +//! +//! # States +//! +//! The state machine moves through the following state DAG: +//! +//! 1. [`BuildingDecryptedTxBatch`] - the initial state. In +//! this state, we populate a block with DKG decrypted txs. +//! 2. [`BuildingProtocolTxBatch`] - the second state. In +//! this state, we populate a block with protocol txs. +//! 3. [`BuildingEncryptedTxBatch`] - the third state. In +//! this state, we populate a block with DKG encrypted txs. +//! This state supports two modes of operation, which you can +//! think of as two states diverging from [`BuildingProtocolTxBatch`]: +//! * [`WithoutEncryptedTxs`] - When this mode is active, no encrypted txs are +//! included in a block proposal. +//! * [`WithEncryptedTxs`] - When this mode is active, we are able to include +//! encrypted txs in a block proposal. +//! 4. [`FillingRemainingSpace`] - the fourth and final state. +//! During this phase, we fill all remaining block space with arbitrary +//! protocol transactions that haven't been included in a block, yet. + +mod decrypted_txs; +mod encrypted_txs; +mod protocol_txs; +mod remaining_txs; + +use super::{AllocFailure, BlockSpaceAllocator}; + +/// Convenience wrapper for a [`BlockSpaceAllocator`] state that allocates +/// encrypted transactions. +pub enum EncryptedTxBatchAllocator { + WithEncryptedTxs( + BlockSpaceAllocator>, + ), + WithoutEncryptedTxs( + BlockSpaceAllocator>, + ), +} + +/// The leader of the current Tendermint round is building +/// a new batch of DKG decrypted transactions. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub enum BuildingDecryptedTxBatch {} + +/// The leader of the current Tendermint round is building +/// a new batch of Namada protocol transactions. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub enum BuildingProtocolTxBatch {} + +/// The leader of the current Tendermint round is building +/// a new batch of DKG encrypted transactions. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub struct BuildingEncryptedTxBatch { + /// One of [`WithEncryptedTxs`] and [`WithoutEncryptedTxs`]. + _mode: Mode, +} + +/// The leader of the current Tendermint round is populating +/// all remaining space in a block proposal with arbitrary +/// protocol transactions that haven't been included in the +/// block, yet. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub enum FillingRemainingSpace {} + +/// Allow block proposals to include encrypted txs. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub enum WithEncryptedTxs {} + +/// Prohibit block proposals from including encrypted txs. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub enum WithoutEncryptedTxs {} + +/// Try to allocate a new transaction on a [`BlockSpaceAllocator`] state. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub trait TryAlloc { + /// Try to allocate space for a new transaction. + fn try_alloc(&mut self, tx: &[u8]) -> Result<(), AllocFailure>; +} + +/// Represents a state transition in the [`BlockSpaceAllocator`] state machine. +/// +/// This trait should not be used directly. Instead, consider using one of +/// [`NextState`], [`NextStateWithEncryptedTxs`] or +/// [`NextStateWithoutEncryptedTxs`]. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub trait NextStateImpl { + /// The next state in the [`BlockSpaceAllocator`] state machine. + type Next; + + /// Transition to the next state in the [`BlockSpaceAllocator`] state + /// machine. + fn next_state_impl(self) -> Self::Next; +} + +/// Convenience extension of [`NextStateImpl`], to transition to a new +/// state with encrypted txs in a block. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub trait NextStateWithEncryptedTxs: NextStateImpl { + /// Transition to the next state in the [`BlockSpaceAllocator`] state, + /// ensuring we include encrypted txs in a block. + #[inline] + fn next_state_with_encrypted_txs(self) -> Self::Next + where + Self: Sized, + { + self.next_state_impl() + } +} + +impl NextStateWithEncryptedTxs for S where S: NextStateImpl {} + +/// Convenience extension of [`NextStateImpl`], to transition to a new +/// state without encrypted txs in a block. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub trait NextStateWithoutEncryptedTxs: + NextStateImpl +{ + /// Transition to the next state in the [`BlockSpaceAllocator`] state, + /// ensuring we do not include encrypted txs in a block. + #[inline] + fn next_state_without_encrypted_txs(self) -> Self::Next + where + Self: Sized, + { + self.next_state_impl() + } +} + +impl NextStateWithoutEncryptedTxs for S where + S: NextStateImpl +{ +} + +/// Convenience extension of [`NextStateImpl`], to transition to a new +/// state with a null transition function. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub trait NextState: NextStateImpl { + /// Transition to the next state in the [`BlockSpaceAllocator`] state, + /// using a null transiiton function. + #[inline] + fn next_state(self) -> Self::Next + where + Self: Sized, + { + self.next_state_impl() + } +} + +impl NextState for S where S: NextStateImpl {} diff --git a/apps/src/lib/node/ledger/shell/block_space_alloc/states/decrypted_txs.rs b/apps/src/lib/node/ledger/shell/block_space_alloc/states/decrypted_txs.rs new file mode 100644 index 0000000000..ec49284e19 --- /dev/null +++ b/apps/src/lib/node/ledger/shell/block_space_alloc/states/decrypted_txs.rs @@ -0,0 +1,46 @@ +use std::marker::PhantomData; + +use super::super::{threshold, AllocFailure, BlockSpaceAllocator, TxBin}; +use super::{ + BuildingDecryptedTxBatch, BuildingProtocolTxBatch, NextStateImpl, TryAlloc, +}; + +impl TryAlloc for BlockSpaceAllocator { + #[inline] + fn try_alloc(&mut self, tx: &[u8]) -> Result<(), AllocFailure> { + self.decrypted_txs.try_dump(tx) + } +} + +impl NextStateImpl for BlockSpaceAllocator { + type Next = BlockSpaceAllocator; + + #[inline] + fn next_state_impl(mut self) -> Self::Next { + self.decrypted_txs.shrink_to_fit(); + + // reserve half of the remaining block space for protocol txs. + // using this strategy, we will eventually converge to 1/3 of + // the allotted block space for protocol txs + let remaining_free_space = self.uninitialized_space_in_bytes(); + self.protocol_txs = + TxBin::init_over_ratio(remaining_free_space, threshold::ONE_HALF); + + // cast state + let Self { + block, + protocol_txs, + encrypted_txs, + decrypted_txs, + .. + } = self; + + BlockSpaceAllocator { + _state: PhantomData, + block, + protocol_txs, + encrypted_txs, + decrypted_txs, + } + } +} diff --git a/apps/src/lib/node/ledger/shell/block_space_alloc/states/encrypted_txs.rs b/apps/src/lib/node/ledger/shell/block_space_alloc/states/encrypted_txs.rs new file mode 100644 index 0000000000..019de0a6b3 --- /dev/null +++ b/apps/src/lib/node/ledger/shell/block_space_alloc/states/encrypted_txs.rs @@ -0,0 +1,106 @@ +use std::marker::PhantomData; + +use super::super::{AllocFailure, BlockSpaceAllocator}; +use super::{ + BuildingEncryptedTxBatch, EncryptedTxBatchAllocator, FillingRemainingSpace, + NextStateImpl, TryAlloc, WithEncryptedTxs, WithoutEncryptedTxs, +}; + +impl TryAlloc + for BlockSpaceAllocator> +{ + #[inline] + fn try_alloc(&mut self, tx: &[u8]) -> Result<(), AllocFailure> { + self.encrypted_txs.try_dump(tx) + } +} + +impl NextStateImpl + for BlockSpaceAllocator> +{ + type Next = BlockSpaceAllocator; + + #[inline] + fn next_state_impl(self) -> Self::Next { + next_state(self) + } +} + +impl TryAlloc + for BlockSpaceAllocator> +{ + #[inline] + fn try_alloc(&mut self, _tx: &[u8]) -> Result<(), AllocFailure> { + Err(AllocFailure::Rejected { bin_space_left: 0 }) + } +} + +impl NextStateImpl + for BlockSpaceAllocator> +{ + type Next = BlockSpaceAllocator; + + #[inline] + fn next_state_impl(self) -> Self::Next { + next_state(self) + } +} + +#[inline] +fn next_state( + mut alloc: BlockSpaceAllocator>, +) -> BlockSpaceAllocator { + alloc.encrypted_txs.shrink_to_fit(); + + // reserve space for any remaining txs + alloc.claim_block_space(); + + // cast state + let BlockSpaceAllocator { + block, + protocol_txs, + encrypted_txs, + decrypted_txs, + .. + } = alloc; + + BlockSpaceAllocator { + _state: PhantomData, + block, + protocol_txs, + encrypted_txs, + decrypted_txs, + } +} + +impl TryAlloc for EncryptedTxBatchAllocator { + #[inline] + fn try_alloc(&mut self, tx: &[u8]) -> Result<(), AllocFailure> { + match self { + EncryptedTxBatchAllocator::WithEncryptedTxs(state) => { + state.try_alloc(tx) + } + EncryptedTxBatchAllocator::WithoutEncryptedTxs(state) => { + // NOTE: this operation will cause the allocator to + // run out of memory immediately + state.try_alloc(tx) + } + } + } +} + +impl NextStateImpl for EncryptedTxBatchAllocator { + type Next = BlockSpaceAllocator; + + #[inline] + fn next_state_impl(self) -> Self::Next { + match self { + EncryptedTxBatchAllocator::WithEncryptedTxs(state) => { + state.next_state_impl() + } + EncryptedTxBatchAllocator::WithoutEncryptedTxs(state) => { + state.next_state_impl() + } + } + } +} diff --git a/apps/src/lib/node/ledger/shell/block_space_alloc/states/protocol_txs.rs b/apps/src/lib/node/ledger/shell/block_space_alloc/states/protocol_txs.rs new file mode 100644 index 0000000000..48194047a8 --- /dev/null +++ b/apps/src/lib/node/ledger/shell/block_space_alloc/states/protocol_txs.rs @@ -0,0 +1,82 @@ +use std::marker::PhantomData; + +use super::super::{threshold, AllocFailure, BlockSpaceAllocator, TxBin}; +use super::{ + BuildingEncryptedTxBatch, BuildingProtocolTxBatch, NextStateImpl, TryAlloc, + WithEncryptedTxs, WithoutEncryptedTxs, +}; + +impl TryAlloc for BlockSpaceAllocator { + #[inline] + fn try_alloc(&mut self, tx: &[u8]) -> Result<(), AllocFailure> { + self.protocol_txs.try_dump(tx) + } +} + +impl NextStateImpl + for BlockSpaceAllocator +{ + type Next = BlockSpaceAllocator>; + + #[inline] + fn next_state_impl(mut self) -> Self::Next { + self.protocol_txs.shrink_to_fit(); + + // reserve space for encrypted txs; encrypted txs can use up to + // 1/3 of the max block space; the rest goes to protocol txs, once + // more + let one_third_of_block_space = + threshold::ONE_THIRD.over(self.block.allotted_space_in_bytes); + let remaining_free_space = self.uninitialized_space_in_bytes(); + self.encrypted_txs = TxBin::init(std::cmp::min( + one_third_of_block_space, + remaining_free_space, + )); + + // cast state + let Self { + block, + protocol_txs, + encrypted_txs, + decrypted_txs, + .. + } = self; + + BlockSpaceAllocator { + _state: PhantomData, + block, + protocol_txs, + encrypted_txs, + decrypted_txs, + } + } +} + +impl NextStateImpl + for BlockSpaceAllocator +{ + type Next = + BlockSpaceAllocator>; + + #[inline] + fn next_state_impl(mut self) -> Self::Next { + self.protocol_txs.shrink_to_fit(); + + // cast state + let Self { + block, + protocol_txs, + encrypted_txs, + decrypted_txs, + .. + } = self; + + BlockSpaceAllocator { + _state: PhantomData, + block, + protocol_txs, + encrypted_txs, + decrypted_txs, + } + } +} diff --git a/apps/src/lib/node/ledger/shell/block_space_alloc/states/remaining_txs.rs b/apps/src/lib/node/ledger/shell/block_space_alloc/states/remaining_txs.rs new file mode 100644 index 0000000000..48f3a43df5 --- /dev/null +++ b/apps/src/lib/node/ledger/shell/block_space_alloc/states/remaining_txs.rs @@ -0,0 +1,11 @@ +use super::super::{AllocFailure, BlockSpaceAllocator}; +use super::{FillingRemainingSpace, TryAlloc}; + +impl TryAlloc for BlockSpaceAllocator { + #[inline] + fn try_alloc(&mut self, tx: &[u8]) -> Result<(), AllocFailure> { + // NOTE: tx dispatching is done at at higher level, to prevent + // allocating space for encrypted txs here + self.block.try_dump(tx) + } +} diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index d762ee91d8..54515675d4 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -3,6 +3,8 @@ use namada::ledger::pos::types::into_tm_voting_power; use namada::ledger::protocol; use namada::types::storage::{BlockHash, BlockResults, Header}; +use namada::types::transaction::protocol::ProtocolTxType; +use namada::types::vote_extensions::ethereum_events::MultiSignedEthEvent; use super::governance::execute_governance_proposals; use super::*; @@ -150,16 +152,57 @@ where ); continue; } - TxType::Protocol(_) => { - tracing::error!( - "Internal logic error: FinalizeBlock received a \ - TxType::Protocol transaction" - ); - continue; - } + TxType::Protocol(protocol_tx) => match protocol_tx.tx { + ProtocolTxType::EthEventsVext(ref ext) => { + if self + .mode + .get_validator_address() + .map(|validator| { + validator == &ext.data.validator_addr + }) + .unwrap_or(false) + { + for event in ext.data.ethereum_events.iter() { + self.mode.dequeue_eth_event(event); + } + } + Event::new_tx_event(&tx_type, height.0) + } + ProtocolTxType::ValSetUpdateVext(_) => { + Event::new_tx_event(&tx_type, height.0) + } + ProtocolTxType::EthereumEvents(ref digest) => { + if let Some(address) = + self.mode.get_validator_address().cloned() + { + let this_signer = + &(address, self.storage.last_height); + for MultiSignedEthEvent { event, signers } in + &digest.events + { + if signers.contains(this_signer) { + self.mode.dequeue_eth_event(event); + } + } + } + Event::new_tx_event(&tx_type, height.0) + } + ProtocolTxType::ValidatorSetUpdate(_) => { + Event::new_tx_event(&tx_type, height.0) + } + ref protocol_tx_type => { + tracing::error!( + ?protocol_tx_type, + "Internal logic error: FinalizeBlock received an \ + unsupported TxType::Protocol transaction: {:?}", + protocol_tx + ); + continue; + } + }, }; - match protocol::apply_tx( + match protocol::dispatch_tx( tx_type, tx_length, TxIndex( @@ -169,7 +212,7 @@ where ), &mut self.gas_meter, &mut self.write_log, - &self.storage, + &mut self.storage, &mut self.vp_wasm_cache, &mut self.tx_wasm_cache, ) @@ -330,8 +373,11 @@ where /// are covered by the e2e tests. #[cfg(test)] mod test_finalize_block { + use namada::types::ethereum_events::EthAddress; use namada::types::storage::Epoch; use namada::types::transaction::{EncryptionKey, Fee}; + use namada::types::vote_extensions::ethereum_events; + use namada::types::vote_extensions::ethereum_events::MultiSignedEthEvent; use super::*; use crate::node::ledger::shell::test_utils::*; @@ -344,7 +390,7 @@ mod test_finalize_block { /// not appear in the queue of txs to be decrypted #[test] fn test_process_proposal_rejected_wrapper_tx() { - let (mut shell, _) = setup(); + let (mut shell, _, _) = setup(); let keypair = gen_keypair(); let mut processed_txs = vec![]; let mut valid_wrappers = vec![]; @@ -419,7 +465,7 @@ mod test_finalize_block { /// proposal #[test] fn test_process_proposal_rejected_decrypted_tx() { - let (mut shell, _) = setup(); + let (mut shell, _, _) = setup(); let keypair = gen_keypair(); let raw_tx = Tx::new( "wasm_code".as_bytes().to_owned(), @@ -467,7 +513,7 @@ mod test_finalize_block { /// but the tx result contains the appropriate error code. #[test] fn test_undecryptable_returns_error_code() { - let (mut shell, _) = setup(); + let (mut shell, _, _) = setup(); let keypair = crate::wallet::defaults::daewon_keypair(); let pubkey = EncryptionKey::default(); @@ -524,7 +570,7 @@ mod test_finalize_block { /// decrypted txs are de-queued. #[test] fn test_mixed_txs_queued_in_correct_order() { - let (mut shell, _) = setup(); + let (mut shell, _, _) = setup(); let keypair = gen_keypair(); let mut processed_txs = vec![]; let mut valid_txs = vec![]; @@ -642,4 +688,174 @@ mod test_finalize_block { } assert_eq!(counter, 2); } + + /// Test if a rejected protocol tx is applied and emits + /// the correct event + #[test] + fn test_rejected_protocol_tx() { + const LAST_HEIGHT: BlockHeight = BlockHeight(3); + let (mut shell, _, _) = setup_at_height(LAST_HEIGHT); + let protocol_key = + shell.mode.get_protocol_key().expect("Test failed").clone(); + + let tx = ProtocolTxType::EthereumEvents(ethereum_events::VextDigest { + signatures: Default::default(), + events: vec![], + }) + .sign(&protocol_key) + .to_bytes(); + + let req = FinalizeBlock { + txs: vec![ProcessedTx { + tx, + result: TxResult { + code: ErrorCodes::InvalidTx.into(), + info: Default::default(), + }, + }], + ..Default::default() + }; + let mut resp = shell.finalize_block(req).expect("Test failed"); + assert_eq!(resp.len(), 1); + let event = resp.remove(0); + assert_eq!(event.event_type.to_string(), String::from("applied")); + let code = event.attributes.get("code").expect("Test failed"); + assert_eq!(code, &String::from(ErrorCodes::InvalidTx)); + } + + /// Test that once a validator's vote for an Ethereum event lands + /// on-chain from a vote extension digest, it dequeues from the + /// list of events to vote on. + #[test] + fn test_eth_events_dequeued_digest() { + let (mut shell, _, oracle) = setup(); + let protocol_key = + shell.mode.get_protocol_key().expect("Test failed").clone(); + let address = shell + .mode + .get_validator_address() + .expect("Test failed") + .clone(); + + // ---- the ledger receives a new Ethereum event + let event = EthereumEvent::NewContract { + name: "Test".to_string(), + address: EthAddress([0; 20]), + }; + tokio_test::block_on(oracle.send(event.clone())).expect("Test failed"); + let [queued_event]: [EthereumEvent; 1] = + shell.new_ethereum_events().try_into().expect("Test failed"); + assert_eq!(queued_event, event); + + // ---- The protocol tx that includes this event on-chain + let ext = ethereum_events::Vext { + block_height: shell.storage.last_height, + ethereum_events: vec![event.clone()], + validator_addr: address.clone(), + } + .sign(&protocol_key); + + let processed_tx = { + let signed = MultiSignedEthEvent { + event, + signers: BTreeSet::from([( + address.clone(), + shell.storage.last_height, + )]), + }; + + let digest = ethereum_events::VextDigest { + signatures: vec![( + (address, shell.storage.last_height), + ext.sig, + )] + .into_iter() + .collect(), + events: vec![signed], + }; + ProcessedTx { + tx: ProtocolTxType::EthereumEvents(digest) + .sign(&protocol_key) + .to_bytes(), + result: TxResult { + code: ErrorCodes::Ok.into(), + info: "".into(), + }, + } + }; + + // ---- This protocol tx is accepted + let [result]: [Event; 1] = shell + .finalize_block(FinalizeBlock { + txs: vec![processed_tx], + ..Default::default() + }) + .expect("Test failed") + .try_into() + .expect("Test failed"); + assert_eq!(result.event_type.to_string(), String::from("applied")); + let code = result.attributes.get("code").expect("Test failed").as_str(); + assert_eq!(code, String::from(ErrorCodes::Ok).as_str()); + + // --- The event is removed from the queue + assert!(shell.new_ethereum_events().is_empty()); + } + + /// Test that once a validator's vote for an Ethereum event lands + /// on-chain from a protocol tx, it dequeues from the + /// list of events to vote on. + #[test] + fn test_eth_events_dequeued_protocol_tx() { + let (mut shell, _, oracle) = setup(); + let protocol_key = + shell.mode.get_protocol_key().expect("Test failed").clone(); + let address = shell + .mode + .get_validator_address() + .expect("Test failed") + .clone(); + + // ---- the ledger receives a new Ethereum event + let event = EthereumEvent::NewContract { + name: "Test".to_string(), + address: EthAddress([0; 20]), + }; + tokio_test::block_on(oracle.send(event.clone())).expect("Test failed"); + let [queued_event]: [EthereumEvent; 1] = + shell.new_ethereum_events().try_into().expect("Test failed"); + assert_eq!(queued_event, event); + + // ---- The protocol tx that includes this event on-chain + let ext = ethereum_events::Vext { + block_height: shell.storage.last_height, + ethereum_events: vec![event], + validator_addr: address, + } + .sign(&protocol_key); + let processed_tx = ProcessedTx { + tx: ProtocolTxType::EthEventsVext(ext) + .sign(&protocol_key) + .to_bytes(), + result: TxResult { + code: ErrorCodes::Ok.into(), + info: "".into(), + }, + }; + + // ---- This protocol tx is accepted + let [result]: [Event; 1] = shell + .finalize_block(FinalizeBlock { + txs: vec![processed_tx], + ..Default::default() + }) + .expect("Test failed") + .try_into() + .expect("Test failed"); + assert_eq!(result.event_type.to_string(), String::from("applied")); + let code = result.attributes.get("code").expect("Test failed").as_str(); + assert_eq!(code, String::from(ErrorCodes::Ok).as_str()); + + // --- The event is removed from the queue + assert!(shell.new_ethereum_events().is_empty()); + } } diff --git a/apps/src/lib/node/ledger/shell/governance.rs b/apps/src/lib/node/ledger/shell/governance.rs index 02a22b1caa..f4771f40fe 100644 --- a/apps/src/lib/node/ledger/shell/governance.rs +++ b/apps/src/lib/node/ledger/shell/governance.rs @@ -81,7 +81,7 @@ where .storage .write(&pending_execution_key, "") .expect("Should be able to write to storage."); - let tx_result = protocol::apply_tx( + let tx_result = protocol::dispatch_tx( tx_type, 0, /* this is used to compute the fee * based on the code size. We dont @@ -89,7 +89,7 @@ where TxIndex::default(), &mut BlockGasMeter::default(), &mut shell.write_log, - &shell.storage, + &mut shell.storage, &mut shell.vp_wasm_cache, &mut shell.tx_wasm_cache, ); diff --git a/apps/src/lib/node/ledger/shell/init_chain.rs b/apps/src/lib/node/ledger/shell/init_chain.rs index d025b2753f..7c711f1c5f 100644 --- a/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/apps/src/lib/node/ledger/shell/init_chain.rs @@ -3,8 +3,13 @@ use std::collections::HashMap; use std::hash::Hash; use namada::ledger::parameters::Parameters; -use namada::ledger::pos::into_tm_voting_power; +use namada::ledger::pos::{into_tm_voting_power, PosParams}; +use namada::ledger::storage::traits::StorageHasher; +use namada::ledger::storage::{DBIter, DB}; +use namada::ledger::{ibc, pos}; use namada::types::key::*; +use namada::types::time::{DateTimeUtc, TimeZone, Utc}; +use namada::types::token; #[cfg(not(feature = "dev"))] use sha2::{Digest, Sha256}; @@ -12,6 +17,7 @@ use super::*; use crate::facade::tendermint_proto::abci; use crate::facade::tendermint_proto::crypto::PublicKey as TendermintPublicKey; use crate::facade::tendermint_proto::google::protobuf; +use crate::facade::tower_abci::{request, response}; use crate::wasm_loader; impl Shell @@ -22,11 +28,13 @@ where /// Create a new genesis for the chain with specified id. This includes /// 1. A set of initial users and tokens /// 2. Setting up the validity predicates for both users and tokens + /// 3. Validators + /// 4. The PoS system + /// 5. The Ethereum bridge parameters pub fn init_chain( &mut self, init: request::InitChain, ) -> Result { - let mut response = response::InitChain::default(); let (current_chain_id, _) = self.storage.get_chain_id(); if current_chain_id != init.chain_id { return Err(Error::ChainId(format!( @@ -65,6 +73,7 @@ where // Initialize protocol parameters let genesis::Parameters { epoch_duration, + max_proposal_bytes, max_expected_time_per_block, vp_whitelist, tx_whitelist, @@ -98,6 +107,7 @@ where } let parameters = Parameters { epoch_duration, + max_proposal_bytes, max_expected_time_per_block, vp_whitelist, tx_whitelist, @@ -112,6 +122,11 @@ where // Initialize governance parameters genesis.gov_params.init_storage(&mut self.storage); + // configure the Ethereum bridge if the configuration is set. + if let Some(config) = genesis.ethereum_bridge_params { + tracing::debug!("Initializing Ethereum bridge storage."); + config.init_storage(&mut self.storage); + } // Depends on parameters being initialized self.storage @@ -122,13 +137,44 @@ where let mut vp_code_cache: HashMap> = HashMap::default(); // Initialize genesis established accounts + self.initialize_established_accounts( + genesis.established_accounts, + &mut vp_code_cache, + )?; + + // Initialize genesis implicit + self.initialize_implicit_accounts(genesis.implicit_accounts); + + // Initialize genesis token accounts + self.initialize_token_accounts( + genesis.token_accounts, + &mut vp_code_cache, + ); + + // Initialize genesis validator accounts + self.initialize_validators(&genesis.validators, &mut vp_code_cache); + // set the initial validators set + Ok( + self.set_initial_validators( + genesis.validators, + &genesis.pos_params, + ), + ) + } + + /// Initialize genesis established accounts + fn initialize_established_accounts( + &mut self, + accounts: Vec, + vp_code_cache: &mut HashMap>, + ) -> Result<()> { for genesis::EstablishedAccount { address, vp_code_path, vp_sha256, public_key, storage, - } in genesis.established_accounts + } in accounts { let vp_code = match vp_code_cache.get(&vp_code_path).cloned() { Some(vp_code) => vp_code, @@ -172,24 +218,37 @@ where self.storage.write(&key, value).unwrap(); } } + Ok(()) + } + /// Initialize genesis implicit accounts + fn initialize_implicit_accounts( + &mut self, + accounts: Vec, + ) { // Initialize genesis implicit - for genesis::ImplicitAccount { public_key } in genesis.implicit_accounts - { - let address: address::Address = (&public_key).into(); + for genesis::ImplicitAccount { public_key } in accounts { + let address: Address = (&public_key).into(); let pk_storage_key = pk_key(&address); self.storage .write(&pk_storage_key, public_key.try_to_vec().unwrap()) .unwrap(); } + } + /// Initialize genesis token accounts + fn initialize_token_accounts( + &mut self, + accounts: Vec, + vp_code_cache: &mut HashMap>, + ) { // Initialize genesis token accounts for genesis::TokenAccount { address, vp_code_path, vp_sha256, balances, - } in genesis.token_accounts + } in accounts { let vp_code = vp_code_cache.get_or_insert_with(vp_code_path.clone(), || { @@ -226,9 +285,16 @@ where .unwrap(); } } + } + /// Initialize genesis validator accounts + fn initialize_validators( + &mut self, + validators: &[genesis::Validator], + vp_code_cache: &mut HashMap>, + ) { // Initialize genesis validator accounts - for validator in &genesis.validators { + for validator in validators { let vp_code = vp_code_cache.get_or_insert_with( validator.validator_vp_code_path.clone(), || { @@ -298,22 +364,27 @@ where ) .expect("Unable to set genesis user public DKG session key"); } + } + /// Initialize the PoS and set the initial validator set + fn set_initial_validators( + &mut self, + validators: Vec, + pos_params: &PosParams, + ) -> response::InitChain { + let mut response = response::InitChain::default(); // PoS system depends on epoch being initialized let (current_epoch, _gas) = self.storage.get_current_epoch(); pos::init_genesis_storage( &mut self.storage, - &genesis.pos_params, - genesis - .validators - .iter() - .map(|validator| &validator.pos_data), + pos_params, + validators.iter().map(|validator| &validator.pos_data), current_epoch, ); ibc::init_genesis_storage(&mut self.storage); // Set the initial validator set - for validator in genesis.validators { + for validator in validators { let mut abci_validator = abci::ValidatorUpdate::default(); let consensus_key: common::PublicKey = validator.pos_data.consensus_key.clone(); @@ -322,12 +393,12 @@ where }; abci_validator.pub_key = Some(pub_key); abci_validator.power = into_tm_voting_power( - genesis.pos_params.tm_votes_per_token, + pos_params.tm_votes_per_token, validator.pos_data.tokens, ); response.validators.push(abci_validator); } - Ok(response) + response } } diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 4c461568a9..18d2be8cc6 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -5,14 +5,16 @@ //! and [`Shell::process_proposal`] must be also reverted //! (unless we can simply overwrite them in the next block). //! More info in . +mod block_space_alloc; mod finalize_block; mod governance; mod init_chain; mod prepare_proposal; mod process_proposal; -mod queries; +pub(super) mod queries; +mod vote_extensions; -use std::collections::HashSet; +use std::collections::{BTreeSet, HashSet}; use std::convert::{TryFrom, TryInto}; use std::mem; use std::path::{Path, PathBuf}; @@ -27,32 +29,29 @@ use namada::ledger::pos::namada_proof_of_stake::types::{ ActiveValidator, ValidatorSetUpdate, }; use namada::ledger::pos::namada_proof_of_stake::PosBase; +use namada::ledger::protocol::ShellParams; +use namada::ledger::storage::traits::{Sha256Hasher, StorageHasher}; use namada::ledger::storage::write_log::WriteLog; -use namada::ledger::storage::{ - DBIter, Sha256Hasher, Storage, StorageHasher, DB, -}; -use namada::ledger::{ibc, pos, protocol}; +use namada::ledger::storage::{DBIter, Storage, DB}; +use namada::ledger::{pos, protocol}; use namada::proto::{self, Tx}; use namada::types::address::{masp, masp_tx_key, Address}; use namada::types::chain::ChainId; +use namada::types::ethereum_events::EthereumEvent; use namada::types::key::*; use namada::types::storage::{BlockHeight, Key, TxIndex}; -use namada::types::time::{DateTimeUtc, TimeZone, Utc}; use namada::types::transaction::{ hash_tx, process_tx, verify_decrypted_correctly, AffineCurve, DecryptedTx, EllipticCurve, PairingEngine, TxType, WrapperTx, }; -use namada::types::{address, token}; use namada::vm::wasm::{TxCache, VpCache}; use namada::vm::WasmCacheRwAccess; use num_derive::{FromPrimitive, ToPrimitive}; use num_traits::{FromPrimitive, ToPrimitive}; use thiserror::Error; -use tokio::sync::mpsc::UnboundedSender; +use tokio::sync::mpsc::{Receiver, UnboundedSender}; use crate::config::{genesis, TendermintMode}; -#[cfg(feature = "abcipp")] -use crate::facade::tendermint_proto::abci::response_verify_vote_extension::VerifyStatus; use crate::facade::tendermint_proto::abci::{ Misbehavior as Evidence, MisbehaviorType as EvidenceType, ValidatorUpdate, }; @@ -62,7 +61,7 @@ use crate::node::ledger::shims::abcipp_shim_types::shim; use crate::node::ledger::shims::abcipp_shim_types::shim::response::TxResult; use crate::node::ledger::{storage, tendermint_node}; #[allow(unused_imports)] -use crate::wallet::ValidatorData; +use crate::wallet::{ValidatorData, ValidatorKeys}; use crate::{config, wallet}; fn key_to_tendermint( @@ -92,6 +91,8 @@ pub enum Error { GasOverflow, #[error("{0}")] Tendermint(tendermint_node::Error), + #[error("{0}")] + Ethereum(super::ethereum_node::Error), #[error("Server error: {0}")] TowerServer(String), #[error("{0}")] @@ -114,7 +115,7 @@ impl From for TxResult { /// The different error codes that the ledger may /// send back to a client indicating the status /// of their submitted tx -#[derive(Debug, Clone, FromPrimitive, ToPrimitive, PartialEq)] +#[derive(Debug, Copy, Clone, FromPrimitive, ToPrimitive, PartialEq)] pub enum ErrorCodes { Ok = 0, InvalidTx = 1, @@ -123,6 +124,17 @@ pub enum ErrorCodes { InvalidOrder = 4, ExtraTxs = 5, Undecryptable = 6, + InvalidVoteExtension = 7, + AllocationError = 8, /* NOTE: keep these values in sync with + * [`ErrorCodes::is_recoverable`] */ +} + +impl ErrorCodes { + /// Checks if the given [`ErrorCodes`] value is a protocol level error, + /// that can be recovered from at the finalize block stage. + pub const fn is_recoverable(self) -> bool { + (self as u32) <= 3 + } } impl From for u32 { @@ -158,20 +170,125 @@ pub(super) enum ShellMode { Validator { data: ValidatorData, broadcast_sender: UnboundedSender>, + ethereum_recv: EthereumReceiver, }, Full, Seed, } -#[allow(dead_code)] +/// A channel for pulling events from the Ethereum oracle +/// and queueing them up for inclusion in vote extensions +#[derive(Debug)] +pub(super) struct EthereumReceiver { + channel: Receiver, + queue: BTreeSet, +} + +impl EthereumReceiver { + /// Create a new [`EthereumReceiver`] from a channel connected + /// to an Ethereum oracle + pub fn new(channel: Receiver) -> Self { + Self { + channel, + queue: BTreeSet::new(), + } + } + + /// Pull messages from the channel and add to queue + /// Since vote extensions require ordering of ethereum + /// events, we do that here. We also de-duplicate events + pub fn fill_queue(&mut self) { + let mut new_events = 0; + while let Ok(eth_event) = self.channel.try_recv() { + if self.queue.insert(eth_event) { + new_events += 1; + }; + } + if new_events > 0 { + tracing::info!(n = new_events, "received Ethereum events"); + } + } + + /// Get a copy of the queue + pub fn get_events(&self) -> Vec { + self.queue.iter().cloned().collect() + } + + /// Remove the given [`EthereumEvent`] from the queue, if present. + /// + /// **INVARIANT:** This method preserves the sorting and de-duplication + /// of events in the queue. + pub fn remove_event(&mut self, event: &EthereumEvent) { + self.queue.remove(event); + } +} + impl ShellMode { /// Get the validator address if ledger is in validator mode - pub fn get_validator_address(&self) -> Option<&address::Address> { + pub fn get_validator_address(&self) -> Option<&Address> { match &self { ShellMode::Validator { data, .. } => Some(&data.address), _ => None, } } + + /// Remove an Ethereum event from the internal queue + pub fn dequeue_eth_event(&mut self, event: &EthereumEvent) { + if let ShellMode::Validator { ethereum_recv, .. } = self { + ethereum_recv.remove_event(event); + } + } + + /// Get the protocol keypair for this validator. + pub fn get_protocol_key(&self) -> Option<&common::SecretKey> { + match self { + ShellMode::Validator { + data: + ValidatorData { + keys: + ValidatorKeys { + protocol_keypair, .. + }, + .. + }, + .. + } => Some(protocol_keypair), + _ => None, + } + } + + /// Get the Ethereum bridge keypair for this validator. + #[cfg_attr(not(test), allow(dead_code))] + pub fn get_eth_bridge_keypair(&self) -> Option<&common::SecretKey> { + match self { + ShellMode::Validator { + data: + ValidatorData { + keys: + ValidatorKeys { + eth_bridge_keypair, .. + }, + .. + }, + .. + } => Some(eth_bridge_keypair), + _ => None, + } + } + + /// If this node is a validator, broadcast a tx + /// to the mempool using the broadcaster subprocess + #[cfg_attr(feature = "abcipp", allow(dead_code))] + pub fn broadcast(&self, data: Vec) { + if let Self::Validator { + broadcast_sender, .. + } = self + { + broadcast_sender + .send(data) + .expect("The broadcaster should be running for a validator"); + } + } } #[derive(Clone, Debug)] @@ -195,9 +312,9 @@ where /// The persistent storage pub(super) storage: Storage, /// Gas meter for the current block - gas_meter: BlockGasMeter, + pub(super) gas_meter: BlockGasMeter, /// Write log for the current block - write_log: WriteLog, + pub(super) write_log: WriteLog, /// Byzantine validators given from ABCI++ `prepare_proposal` are stored in /// this field. They will be slashed when we finalize the block. byzantine_validators: Vec, @@ -205,14 +322,14 @@ where #[allow(dead_code)] base_dir: PathBuf, /// Path to the WASM directory for files used in the genesis block. - wasm_dir: PathBuf, + pub(super) wasm_dir: PathBuf, /// Information about the running shell instance #[allow(dead_code)] mode: ShellMode, /// VP WASM compilation cache - vp_wasm_cache: VpCache, + pub(super) vp_wasm_cache: VpCache, /// Tx WASM compilation cache - tx_wasm_cache: TxCache, + pub(super) tx_wasm_cache: TxCache, /// Taken from config `storage_read_past_height_limit`. When set, will /// limit the how many block heights in the past can the storage be /// queried for reading values. @@ -230,10 +347,12 @@ where { /// Create a new shell from a path to a database and a chain id. Looks /// up the database with this data and tries to load the last state. + #[allow(clippy::too_many_arguments)] pub fn new( config: config::Ledger, wasm_dir: PathBuf, broadcast_sender: UnboundedSender>, + eth_receiver: Option>, db_cache: Option<&D::Cache>, vp_wasm_compilation_cache: u64, tx_wasm_compilation_cache: u64, @@ -287,6 +406,9 @@ where .map(|data| ShellMode::Validator { data, broadcast_sender, + ethereum_recv: EthereumReceiver::new( + eth_receiver.unwrap(), + ), }) .expect( "Validator data should have been stored in the \ @@ -295,16 +417,21 @@ where } #[cfg(feature = "dev")] { - let validator_keys = wallet::defaults::validator_keys(); + let (protocol_keypair, eth_bridge_keypair, dkg_keypair) = + wallet::defaults::validator_keys(); ShellMode::Validator { data: wallet::ValidatorData { address: wallet::defaults::validator_address(), keys: wallet::ValidatorKeys { - protocol_keypair: validator_keys.0, - dkg_keypair: Some(validator_keys.1), + protocol_keypair, + eth_bridge_keypair, + dkg_keypair: Some(dkg_keypair), }, }, broadcast_sender, + ethereum_recv: EthereumReceiver::new( + eth_receiver.unwrap(), + ), } } } @@ -519,26 +646,6 @@ where } } - /// INVARIANT: This method must be stateless. - #[cfg(feature = "abcipp")] - pub fn extend_vote( - &self, - _req: request::ExtendVote, - ) -> response::ExtendVote { - Default::default() - } - - /// INVARIANT: This method must be stateless. - #[cfg(feature = "abcipp")] - pub fn verify_vote_extension( - &self, - _req: request::VerifyVoteExtension, - ) -> response::VerifyVoteExtension { - response::VerifyVoteExtension { - status: VerifyStatus::Accept as i32, - } - } - /// Commit a block. Persist the application state and return the Merkle root /// hash. pub fn commit(&mut self) -> response::Commit { @@ -562,65 +669,134 @@ where self.storage.last_height, ); response.data = root.0; + + #[cfg(not(feature = "abcipp"))] + { + use crate::node::ledger::shell::vote_extensions::iter_protocol_txs; + + if let ShellMode::Validator { .. } = &self.mode { + let ext = self.craft_extension(); + + let protocol_key = self + .mode + .get_protocol_key() + .expect("Validators should have protocol keys"); + + let protocol_txs = iter_protocol_txs(ext).map(|protocol_tx| { + protocol_tx.sign(protocol_key).to_bytes() + }); + + for tx in protocol_txs { + self.mode.broadcast(tx); + } + } + } + response } /// Validate a transaction request. On success, the transaction will /// included in the mempool and propagated to peers, otherwise it will be /// rejected. + // TODO: move this to another file after 0.11 merges, + // since this method has become fairly large at this point pub fn mempool_validate( &self, tx_bytes: &[u8], r#_type: MempoolTxType, ) -> response::CheckTx { + use namada::types::transaction::protocol::ProtocolTx; + #[cfg(not(feature = "abcipp"))] + use namada::types::transaction::protocol::ProtocolTxType; + let mut response = response::CheckTx::default(); - match Tx::try_from(tx_bytes).map_err(Error::TxDecoding) { - Ok(_) => response.log = String::from("Mempool validation passed"), - Err(msg) => { - response.code = 1; - response.log = msg.to_string(); - } - } - response - } - #[allow(dead_code)] - /// Simulate validation and application of a transaction. - fn dry_run_tx(&self, tx_bytes: &[u8]) -> response::Query { - let mut response = response::Query::default(); - let mut gas_meter = BlockGasMeter::default(); - let mut write_log = WriteLog::default(); - let mut vp_wasm_cache = self.vp_wasm_cache.read_only(); - let mut tx_wasm_cache = self.tx_wasm_cache.read_only(); - match Tx::try_from(tx_bytes) { + const VALID_MSG: &str = "Mempool validation passed"; + const INVALID_MSG: &str = "Mempool validation failed"; + + match Tx::try_from(tx_bytes).map_err(Error::TxDecoding) { Ok(tx) => { - let tx = TxType::Decrypted(DecryptedTx::Decrypted(tx)); - match protocol::apply_tx( - tx, - tx_bytes.len(), - TxIndex::default(), - &mut gas_meter, - &mut write_log, - &self.storage, - &mut vp_wasm_cache, - &mut tx_wasm_cache, - ) - .map_err(Error::TxApply) - { - Ok(result) => response.info = result.to_string(), - Err(error) => { + match process_tx(tx) { + #[cfg(not(feature = "abcipp"))] + Ok(TxType::Protocol(ProtocolTx { + tx: ProtocolTxType::EthEventsVext(ext), + .. + })) => { + if let Err(err) = self + .validate_eth_events_vext_and_get_it_back( + ext, + self.storage.last_height, + ) + { + response.code = 1; + response.log = format!( + "{INVALID_MSG}: Invalid Ethereum events vote \ + extension: {err}", + ); + } else { + response.log = String::from(VALID_MSG); + } + } + #[cfg(not(feature = "abcipp"))] + Ok(TxType::Protocol(ProtocolTx { + tx: ProtocolTxType::ValSetUpdateVext(ext), + .. + })) => { + if let Err(err) = self + .validate_valset_upd_vext_and_get_it_back( + ext, + self.storage.last_height, + ) + { + response.code = 1; + response.log = format!( + "{INVALID_MSG}: Invalid validator set update \ + vote extension: {err}", + ); + } else { + response.log = String::from(VALID_MSG); + // validator set update votes should be decided + // as soon as possible + response.priority = i64::MAX; + } + } + Ok(TxType::Protocol(ProtocolTx { .. })) => { response.code = 1; - response.log = format!("{}", error); + response.log = format!( + "{INVALID_MSG}: The given protocol tx cannot be \ + added to the mempool" + ); + } + Ok(TxType::Wrapper(_)) => { + response.log = String::from(VALID_MSG); + } + Ok(TxType::Raw(_)) => { + response.code = 1; + response.log = format!( + "{INVALID_MSG}: Raw transactions cannot be \ + accepted into the mempool" + ); + } + Ok(TxType::Decrypted(_)) => { + response.code = 1; + response.log = format!( + "{INVALID_MSG}: Decrypted txs cannot be sent by \ + clients" + ); + } + Err(err) => { + response.code = 1; + response.log = format!("{INVALID_MSG}: {err}"); } } - response } - Err(err) => { + Err(msg) => { response.code = 1; - response.log = format!("{}", Error::TxDecoding(err)); - response + response.log = format!("{INVALID_MSG}: {msg}"); } } + + response } /// Lookup a validator's keypair for their established account from their @@ -659,6 +835,23 @@ where } } +impl<'a, D, H> From<&'a mut Shell> + for ShellParams<'a, D, H, namada::vm::WasmCacheRwAccess> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + fn from(shell: &'a mut Shell) -> Self { + Self { + block_gas_meter: &mut shell.gas_meter, + write_log: &mut shell.write_log, + storage: &shell.storage, + vp_wasm_cache: &mut shell.vp_wasm_cache, + tx_wasm_cache: &mut shell.tx_wasm_cache, + } + } +} + /// Helper functions and types for writing unit tests /// for the shell #[cfg(test)] @@ -668,14 +861,15 @@ mod test_utils { use namada::ledger::storage::mockdb::MockDB; use namada::ledger::storage::{BlockStateWrite, MerkleTree, Sha256Hasher}; - use namada::types::address::EstablishedAddressGen; + use namada::types::address::{self, EstablishedAddressGen}; use namada::types::chain::ChainId; use namada::types::hash::Hash; use namada::types::key::*; use namada::types::storage::{BlockHash, BlockResults, Epoch, Header}; + use namada::types::time::DateTimeUtc; use namada::types::transaction::Fee; use tempfile::tempdir; - use tokio::sync::mpsc::UnboundedReceiver; + use tokio::sync::mpsc::{Sender, UnboundedReceiver}; use super::*; use crate::facade::tendermint_proto::abci::{ @@ -686,6 +880,7 @@ mod test_utils { FinalizeBlock, ProcessedTx, }; use crate::node::ledger::storage::{PersistentDB, PersistentStorageHasher}; + use crate::node::ledger::ORACLE_CHANNEL_BUFFER_SIZE; #[derive(Error, Debug)] pub enum TestError { @@ -708,7 +903,13 @@ mod test_utils { } /// Generate a random public/private keypair + #[inline] pub(super) fn gen_keypair() -> common::SecretKey { + gen_ed25519_keypair() + } + + /// Generate a random ed25519 public/private keypair + pub(super) fn gen_ed25519_keypair() -> common::SecretKey { use rand::prelude::ThreadRng; use rand::thread_rng; @@ -716,6 +917,44 @@ mod test_utils { ed25519::SigScheme::generate(&mut rng).try_to_sk().unwrap() } + /// Generate a random secp256k1 public/private keypair + pub(super) fn gen_secp256k1_keypair() -> common::SecretKey { + use rand::prelude::ThreadRng; + use rand::thread_rng; + + let mut rng: ThreadRng = thread_rng(); + secp256k1::SigScheme::generate(&mut rng) + .try_to_sk() + .unwrap() + } + + /// Invalidate a valid signature `sig`. + pub(super) fn invalidate_signature( + sig: common::Signature, + ) -> common::Signature { + match sig { + common::Signature::Ed25519(ed25519::Signature(ref sig)) => { + let mut sig_bytes = sig.to_bytes(); + sig_bytes[0] = sig_bytes[0].wrapping_add(1); + common::Signature::Ed25519(ed25519::Signature(sig_bytes.into())) + } + common::Signature::Secp256k1(secp256k1::Signature( + ref sig, + ref recovery_id, + )) => { + let mut sig_bytes = sig.serialize(); + let recovery_id_bytes = recovery_id.serialize(); + sig_bytes[0] = sig_bytes[0].wrapping_add(1); + let bytes: [u8; 65] = + [sig_bytes.as_slice(), [recovery_id_bytes].as_slice()] + .concat() + .try_into() + .unwrap(); + common::Signature::Secp256k1((&bytes).try_into().unwrap()) + } + } + } + /// A wrapper around the shell that implements /// Drop so as to clean up the files that it /// generates. Also allows illegal state @@ -746,31 +985,45 @@ mod test_utils { } impl TestShell { - /// Returns a new shell paired with a broadcast receiver, which will - /// receives any protocol txs sent by the shell. - pub fn new() -> (Self, UnboundedReceiver>) { + /// Returns a new shell with + /// - A broadcast receiver, which will receive any protocol txs sent + /// by the shell. + /// - A sender that can send Ethereum events into the ledger, mocking + /// the Ethereum fullnode process + pub fn new_at_height>( + height: H, + ) -> (Self, UnboundedReceiver>, Sender) { let (sender, receiver) = tokio::sync::mpsc::unbounded_channel(); + let (eth_sender, eth_receiver) = + tokio::sync::mpsc::channel(ORACLE_CHANNEL_BUFFER_SIZE); let base_dir = tempdir().unwrap().as_ref().canonicalize().unwrap(); let vp_wasm_compilation_cache = 50 * 1024 * 1024; // 50 kiB let tx_wasm_compilation_cache = 50 * 1024 * 1024; // 50 kiB - ( - Self { - shell: Shell::::new( - config::Ledger::new( - base_dir, - Default::default(), - TendermintMode::Validator, - ), - top_level_directory().join("wasm"), - sender, - None, - vp_wasm_compilation_cache, - tx_wasm_compilation_cache, - address::nam(), - ), - }, - receiver, - ) + let mut shell = Shell::::new( + config::Ledger::new( + base_dir, + Default::default(), + TendermintMode::Validator, + ), + top_level_directory().join("wasm"), + sender, + Some(eth_receiver), + None, + vp_wasm_compilation_cache, + tx_wasm_compilation_cache, + address::nam(), + ); + shell.storage.last_height = height.into(); + (Self { shell }, receiver, eth_sender) + } + + /// Same as [`TestShell::new_at_height`], but returns a shell at block + /// height 0. + #[inline] + #[allow(dead_code)] + pub fn new() -> (Self, UnboundedReceiver>, Sender) + { + Self::new_at_height(BlockHeight(1)) } /// Forward a InitChain request and expect a success @@ -826,11 +1079,21 @@ mod test_utils { } } + /// Get the only validator's voting power. + #[inline] + #[cfg(not(feature = "abcipp"))] + pub fn get_validator_bonded_stake() -> namada::types::token::Amount { + 200_000_000_000.into() + } + /// Start a new test shell and initialize it. Returns the shell paired with /// a broadcast receiver, which will receives any protocol txs sent by the /// shell. - pub(super) fn setup() -> (TestShell, UnboundedReceiver>) { - let (mut test, receiver) = TestShell::new(); + pub(super) fn setup_at_height>( + height: H, + ) -> (TestShell, UnboundedReceiver>, Sender) { + let (mut test, receiver, eth_receiver) = + TestShell::new_at_height(height); test.init_chain(RequestInitChain { time: Some(Timestamp { seconds: 0, @@ -839,7 +1102,14 @@ mod test_utils { chain_id: ChainId::default().to_string(), ..Default::default() }); - (test, receiver) + (test, receiver, eth_receiver) + } + + /// Same as [`setup`], but returns a shell at block height 0. + #[inline] + pub(super) fn setup() + -> (TestShell, UnboundedReceiver>, Sender) { + setup_at_height(BlockHeight(0)) } /// This is just to be used in testing. It is not @@ -866,6 +1136,8 @@ mod test_utils { let base_dir = tempdir().unwrap().as_ref().canonicalize().unwrap(); // we have to use RocksDB for this test let (sender, _) = tokio::sync::mpsc::unbounded_channel(); + let (_, receiver) = + tokio::sync::mpsc::channel(ORACLE_CHANNEL_BUFFER_SIZE); let vp_wasm_compilation_cache = 50 * 1024 * 1024; // 50 kiB let tx_wasm_compilation_cache = 50 * 1024 * 1024; // 50 kiB let native_token = address::nam(); @@ -877,6 +1149,7 @@ mod test_utils { ), top_level_directory().join("wasm"), sender.clone(), + Some(receiver), None, vp_wasm_compilation_cache, tx_wasm_compilation_cache, @@ -927,7 +1200,8 @@ mod test_utils { // Drop the shell std::mem::drop(shell); - + let (_, receiver) = + tokio::sync::mpsc::channel(ORACLE_CHANNEL_BUFFER_SIZE); // Reboot the shell and check that the queue was restored from DB let shell = Shell::::new( config::Ledger::new( @@ -937,6 +1211,7 @@ mod test_utils { ), top_level_directory().join("wasm"), sender, + Some(receiver), None, vp_wasm_compilation_cache, tx_wasm_compilation_cache, diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 9338ecc482..927ad4b9df 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -1,17 +1,39 @@ //! Implementation of the [`RequestPrepareProposal`] ABCI++ method for the Shell -use namada::ledger::storage::{DBIter, StorageHasher, DB}; +#[cfg(not(feature = "abcipp"))] +use index_set::vec::VecIndexSet; +use namada::core::hints; +use namada::ledger::pos::PosQueries; +#[cfg(feature = "abcipp")] +use namada::ledger::pos::SendValsetUpd; +use namada::ledger::storage::traits::StorageHasher; +use namada::ledger::storage::{DBIter, DB}; use namada::proto::Tx; +use namada::types::storage::BlockHeight; use namada::types::transaction::tx_types::TxType; use namada::types::transaction::wrapper::wrapper_tx::PairingEngine; use namada::types::transaction::{AffineCurve, DecryptedTx, EllipticCurve}; +#[cfg(feature = "abcipp")] +use namada::types::vote_extensions::VoteExtensionDigest; use super::super::*; +use super::block_space_alloc::states::{ + BuildingDecryptedTxBatch, BuildingProtocolTxBatch, + EncryptedTxBatchAllocator, FillingRemainingSpace, NextState, + NextStateWithEncryptedTxs, NextStateWithoutEncryptedTxs, TryAlloc, +}; +use super::block_space_alloc::{AllocFailure, BlockSpaceAllocator}; +#[cfg(feature = "abcipp")] +use crate::facade::tendermint_proto::abci::ExtendedCommitInfo; use crate::facade::tendermint_proto::abci::RequestPrepareProposal; +#[cfg(not(feature = "abcipp"))] +use crate::node::ledger::shell::vote_extensions::deserialize_vote_extensions; #[cfg(feature = "abcipp")] -use crate::facade::tendermint_proto::abci::{tx_record::TxAction, TxRecord}; +use crate::node::ledger::shell::vote_extensions::iter_protocol_txs; +#[cfg(feature = "abcipp")] +use crate::node::ledger::shell::vote_extensions::split_vote_extensions; use crate::node::ledger::shell::{process_tx, ShellMode}; -use crate::node::ledger::shims::abcipp_shim_types::shim::TxBytes; +use crate::node::ledger::shims::abcipp_shim_types::shim::{response, TxBytes}; impl Shell where @@ -20,162 +42,1057 @@ where { /// Begin a new block. /// - /// We include half of the new wrapper txs given to us from the mempool - /// by tendermint. The rest of the block is filled with decryptions - /// of the wrapper txs from the previously committed block. + /// Block construction is documented in [`block_space_alloc`] + /// and [`block_space_alloc::states`]. /// /// INVARIANT: Any changes applied in this method must be reverted if /// the proposal is rejected (unless we can simply overwrite /// them in the next block). pub fn prepare_proposal( - &self, + &mut self, req: RequestPrepareProposal, ) -> response::PrepareProposal { + // We can safely reset meter, because if the block is rejected, + // we'll reset again on the next proposal, until the + // proposal is accepted + self.gas_meter.reset(); let txs = if let ShellMode::Validator { .. } = self.mode { - // TODO: This should not be hardcoded - let privkey = ::G2Affine::prime_subgroup_generator(); - - // TODO: Craft the Ethereum state update tx - // filter in half of the new txs from Tendermint, only keeping - // wrappers - let number_of_new_txs = 1 + req.txs.len() / 2; - #[cfg(feature = "abcipp")] - let mut txs: Vec = req - .txs - .into_iter() - .take(number_of_new_txs) - .map(|tx_bytes| { - if let Ok(Ok(TxType::Wrapper(_))) = - Tx::try_from(tx_bytes.as_slice()).map(process_tx) - { - record::keep(tx_bytes) - } else { - record::remove(tx_bytes) - } - }) - .collect(); + // start counting allotted space for txs + let alloc = BlockSpaceAllocator::from(&self.storage); #[cfg(not(feature = "abcipp"))] - let mut txs: Vec = req - .txs - .into_iter() - .take(number_of_new_txs) - .filter_map(|tx_bytes| { - if let Ok(Ok(TxType::Wrapper(_))) = - Tx::try_from(tx_bytes.as_slice()).map(process_tx) - { - Some(tx_bytes) - } else { - None - } - }) - .collect(); + let mut protocol_tx_indices = VecIndexSet::default(); // decrypt the wrapper txs included in the previous block - let decrypted_txs = self.storage.tx_queue.iter().map(|tx| { + let (decrypted_txs, alloc) = self.build_decrypted_txs(alloc); + let mut txs = decrypted_txs; + + // add vote extension protocol txs + let (mut protocol_txs, alloc) = self.build_protocol_txs( + alloc, + #[cfg(not(feature = "abcipp"))] + &mut protocol_tx_indices, + #[cfg(feature = "abcipp")] + req.local_last_commit, + #[cfg(not(feature = "abcipp"))] + &req.txs, + ); + txs.append(&mut protocol_txs); + + // add encrypted txs + let (mut encrypted_txs, alloc) = + self.build_encrypted_txs(alloc, &req.txs); + txs.append(&mut encrypted_txs); + + // fill up the remaining block space with + // protocol transactions that haven't been + // selected for inclusion yet, and whose + // size allows them to fit in the free + // space left + let mut remaining_txs = self.build_remaining_batch( + alloc, + #[cfg(not(feature = "abcipp"))] + &protocol_tx_indices, + req.txs, + ); + txs.append(&mut remaining_txs); + + txs + } else { + vec![] + }; + + tracing::info!( + height = req.height, + num_of_txs = txs.len(), + "Proposing block" + ); + + response::PrepareProposal { txs } + } + + /// Builds a batch of DKG decrypted transactions. + // NOTE: we won't have frontrunning protection until V2 of the + // Anoma protocol; Namada runs V1, therefore this method is + // essentially a NOOP + // + // sources: + // - https://specs.namada.net/main/releases/v2.html + // - https://github.com/anoma/ferveo + fn build_decrypted_txs( + &mut self, + mut alloc: BlockSpaceAllocator, + ) -> (Vec, BlockSpaceAllocator) { + // TODO: This should not be hardcoded + let privkey = + ::G2Affine::prime_subgroup_generator(); + + let txs = self + .storage + .tx_queue + .iter() + .map(|tx| { Tx::from(match tx.decrypt(privkey) { Ok(tx) => DecryptedTx::Decrypted(tx), _ => DecryptedTx::Undecryptable(tx.clone()), }) .to_bytes() - }); - #[cfg(feature = "abcipp")] - let mut decrypted_txs: Vec<_> = - decrypted_txs.map(record::add).collect(); - #[cfg(not(feature = "abcipp"))] - let mut decrypted_txs: Vec<_> = decrypted_txs.collect(); + }) + // TODO: make sure all decrypted txs are accepted + .take_while(|tx_bytes| { + alloc.try_alloc(&tx_bytes[..]).map_or_else( + |status| match status { + AllocFailure::Rejected { bin_space_left } => { + tracing::warn!( + ?tx_bytes, + bin_space_left, + proposal_height = + ?self.storage.get_current_decision_height(), + "Dropping decrypted tx from the current proposal", + ); + false + } + AllocFailure::OverflowsBin { bin_size } => { + tracing::warn!( + ?tx_bytes, + bin_size, + proposal_height = + ?self.storage.get_current_decision_height(), + "Dropping large decrypted tx from the current proposal", + ); + true + } + }, + |()| true, + ) + }) + .collect(); + let alloc = alloc.next_state(); - txs.append(&mut decrypted_txs); - txs + (txs, alloc) + } + + /// Builds a batch of vote extension transactions, comprised of Ethereum + /// events and, optionally, a validator set update. + #[cfg(feature = "abcipp")] + fn build_protocol_txs( + &mut self, + alloc: BlockSpaceAllocator, + local_last_commit: Option, + ) -> (Vec, EncryptedTxBatchAllocator) { + // genesis should not contain vote extensions + if self.storage.last_height == BlockHeight(0) { + return (vec![], self.get_encrypted_txs_allocator(alloc)); + } + + let (eth_events, valset_upds) = split_vote_extensions( + local_last_commit + .expect( + "Honest Namada validators will always sign \ + ethereum_events::Vext instances, even if no Ethereum \ + events were observed at a given block height. In fact, a \ + quorum of signed empty ethereum_events::Vext instances \ + commits the fact no events were observed by a majority \ + of validators. Therefore, for block heights greater than \ + zero, we should always have vote extensions.", + ) + .votes, + ); + + let ethereum_events = self + .compress_ethereum_events(eth_events) + .unwrap_or_else(|| panic!("{}", not_enough_voting_power_msg())); + + let validator_set_update = + if self + .storage + .can_send_validator_set_update(SendValsetUpd::AtPrevHeight) + { + Some(self.compress_valset_updates(valset_upds).unwrap_or_else( + || panic!("{}", not_enough_voting_power_msg()), + )) + } else { + None + }; + + let protocol_key = self + .mode + .get_protocol_key() + .expect("Validators should always have a protocol key"); + + let txs: Vec<_> = iter_protocol_txs(VoteExtensionDigest { + ethereum_events, + validator_set_update, + }) + .map(|tx| tx.sign(protocol_key).to_bytes()) + .collect(); + + // TODO(feature = "abcipp"): + // - alloc space for each protocol tx + // - handle space allocation errors + // - transition to new allocator state + + (txs, self.get_encrypted_txs_allocator(alloc)) + } + + /// Builds a batch of vote extension transactions, comprised of Ethereum + /// events and, optionally, a validator set update + #[cfg(not(feature = "abcipp"))] + fn build_protocol_txs( + &mut self, + mut alloc: BlockSpaceAllocator, + protocol_tx_indices: &mut VecIndexSet, + txs: &[TxBytes], + ) -> (Vec, EncryptedTxBatchAllocator) { + if self.storage.last_height == BlockHeight(0) { + // genesis should not contain vote extensions + return (vec![], self.get_encrypted_txs_allocator(alloc)); + } + + let txs = deserialize_vote_extensions(txs, protocol_tx_indices).take_while(|tx_bytes| + alloc.try_alloc(&tx_bytes[..]) + .map_or_else( + |status| match status { + AllocFailure::Rejected { bin_space_left } => { + // TODO: maybe we should find a way to include + // validator set updates all the time. for instance, + // we could have recursive bins -> bin space within + // a bin is partitioned into yet more bins. so, we + // could have, say, 2/3 of the bin space available + // for eth events, and 1/3 available for valset + // upds. to be determined, as we implement CheckTx + // changes (issue #367) + tracing::debug!( + ?tx_bytes, + bin_space_left, + proposal_height = + ?self.storage.get_current_decision_height(), + "Dropping protocol tx from the current proposal", + ); + false + } + AllocFailure::OverflowsBin { bin_size } => { + // TODO: handle tx whose size is greater + // than bin size + tracing::warn!( + ?tx_bytes, + bin_size, + proposal_height = + ?self.storage.get_current_decision_height(), + "Dropping large protocol tx from the current proposal", + ); + true + } + }, + |()| true, + ) + ) + .collect(); + + (txs, self.get_encrypted_txs_allocator(alloc)) + } + + /// Depending on the current block height offset within the epoch, + /// transition state accordingly, from a protocol tx batch allocator + /// to an encrypted tx batch allocator. + /// + /// # How to determine which path to take in the states DAG + /// + /// If we are at the second or third block height offset within an + /// epoch, we do not allow encrypted transactions to be included in + /// a block, therefore we return an allocator wrapped in an + /// [`EncryptedTxBatchAllocator::WithoutEncryptedTxs`] value. + /// Otherwise, we return an allocator wrapped in an + /// [`EncryptedTxBatchAllocator::WithEncryptedTxs`] value. + #[inline] + fn get_encrypted_txs_allocator( + &self, + alloc: BlockSpaceAllocator, + ) -> EncryptedTxBatchAllocator { + let is_2nd_height_off = self.storage.is_deciding_offset_within_epoch(1); + let is_3rd_height_off = self.storage.is_deciding_offset_within_epoch(2); + + if hints::unlikely(is_2nd_height_off || is_3rd_height_off) { + tracing::warn!( + proposal_height = + ?self.storage.get_current_decision_height(), + "No mempool txs are being included in the current proposal" + ); + EncryptedTxBatchAllocator::WithoutEncryptedTxs( + alloc.next_state_without_encrypted_txs(), + ) } else { - vec![] - }; + EncryptedTxBatchAllocator::WithEncryptedTxs( + alloc.next_state_with_encrypted_txs(), + ) + } + } - #[cfg(feature = "abcipp")] - { - response::PrepareProposal { - tx_records: txs, - ..Default::default() - } + /// Builds a batch of encrypted transactions, retrieved from + /// Tendermint's mempool. + fn build_encrypted_txs( + &mut self, + mut alloc: EncryptedTxBatchAllocator, + txs: &[TxBytes], + ) -> (Vec, BlockSpaceAllocator) { + let txs = txs + .iter() + .filter_map(|tx_bytes| { + if let Ok(Ok(TxType::Wrapper(_))) = + Tx::try_from(tx_bytes.as_slice()).map(process_tx) + { + Some(tx_bytes.clone()) + } else { + None + } + }) + .take_while(|tx_bytes| { + alloc.try_alloc(&tx_bytes[..]) + .map_or_else( + |status| match status { + AllocFailure::Rejected { bin_space_left } => { + tracing::debug!( + ?tx_bytes, + bin_space_left, + proposal_height = + ?self.storage.get_current_decision_height(), + "Dropping encrypted tx from the current proposal", + ); + false + } + AllocFailure::OverflowsBin { bin_size } => { + // TODO: handle tx whose size is greater + // than bin size + tracing::warn!( + ?tx_bytes, + bin_size, + proposal_height = + ?self.storage.get_current_decision_height(), + "Dropping large encrypted tx from the current proposal", + ); + true + } + }, + |()| true, + ) + }) + .collect(); + let alloc = alloc.next_state(); + + (txs, alloc) + } + + /// Builds a batch of transactions that can fit in the + /// remaining space of the [`BlockSpaceAllocator`]. + #[cfg(feature = "abcipp")] + fn build_remaining_batch( + &mut self, + _alloc: BlockSpaceAllocator, + _txs: Vec, + ) -> Vec { + vec![] + } + + /// Builds a batch of transactions that can fit in the + /// remaining space of the [`BlockSpaceAllocator`]. + #[cfg(not(feature = "abcipp"))] + fn build_remaining_batch( + &mut self, + mut alloc: BlockSpaceAllocator, + protocol_tx_indices: &VecIndexSet, + txs: Vec, + ) -> Vec { + get_remaining_protocol_txs(protocol_tx_indices, txs) + .take_while(|tx_bytes| { + alloc.try_alloc(&tx_bytes[..]).map_or_else( + |status| match status { + AllocFailure::Rejected { bin_space_left } => { + tracing::debug!( + ?tx_bytes, + bin_space_left, + proposal_height = + ?self.storage.get_current_decision_height(), + "Dropping tx from the current proposal", + ); + false + } + AllocFailure::OverflowsBin { bin_size } => { + // TODO: handle tx whose size is greater + // than bin size + tracing::warn!( + ?tx_bytes, + bin_size, + proposal_height = + ?self.storage.get_current_decision_height(), + "Dropping large tx from the current proposal", + ); + true + } + }, + |()| true, + ) + }) + .collect() + } +} + +/// Return a list of the protocol transactions that haven't +/// been marked for inclusion in the block, yet. +#[cfg(not(feature = "abcipp"))] +fn get_remaining_protocol_txs( + protocol_tx_indices: &VecIndexSet, + txs: Vec, +) -> impl Iterator + '_ { + let mut skip_list = protocol_tx_indices.iter(); + let mut skip = skip_list.next(); + + txs.into_iter().enumerate().filter_map(move |(index, tx)| { + // this works bc/ tx indices are ordered + // in ascending order + if hints::likely(Some(index) == skip) { + skip = skip_list.next(); + return None; } - #[cfg(not(feature = "abcipp"))] + if let Ok(Ok(TxType::Protocol(_))) = + Tx::try_from(&tx[..]).map(process_tx) { - response::PrepareProposal { txs } + return Some(tx); } - } + None + }) } -/// Functions for creating the appropriate TxRecord given the -/// numeric code +/// Returns a suitable message to be displayed when Tendermint +/// somehow decides on a block containing vote extensions +/// reflecting `<= 2/3` of the total stake. #[cfg(feature = "abcipp")] -pub(super) mod record { +const fn not_enough_voting_power_msg() -> &'static str { + "A Tendermint quorum should never decide on a block including vote \ + extensions reflecting less than or equal to 2/3 of the total stake." +} + +#[cfg(test)] +// TODO: write tests for validator set update vote extensions in +// prepare proposals +mod test_prepare_proposal { + #[cfg(feature = "abcipp")] + use std::collections::{BTreeSet, HashMap}; + + use borsh::{BorshDeserialize, BorshSerialize}; + use namada::ledger::pos::namada_proof_of_stake::types::WeightedValidator; + use namada::ledger::pos::namada_proof_of_stake::PosBase; + use namada::ledger::pos::PosQueries; + use namada::proto::{Signed, SignedTxData}; + use namada::types::ethereum_events::EthereumEvent; + #[cfg(feature = "abcipp")] + use namada::types::key::common; + use namada::types::key::RefTo; + use namada::types::storage::{BlockHeight, Epoch}; + use namada::types::transaction::protocol::ProtocolTxType; + use namada::types::transaction::{Fee, TxType, WrapperTx}; + use namada::types::vote_extensions::ethereum_events; + #[cfg(feature = "abcipp")] + use namada::types::vote_extensions::VoteExtension; + use super::*; + #[cfg(feature = "abcipp")] + use crate::facade::tendermint_proto::abci::{ + ExtendedCommitInfo, ExtendedVoteInfo, + }; + use crate::node::ledger::shell::test_utils::{ + self, gen_keypair, TestShell, + }; + use crate::node::ledger::shims::abcipp_shim_types::shim::request::FinalizeBlock; + use crate::wallet; - /// Keep this transaction in the proposal - pub fn keep(tx: TxBytes) -> TxRecord { - TxRecord { - action: TxAction::Unmodified as i32, - tx, + /// Extract an [`ethereum_events::SignedVext`], from a set of + /// serialized [`TxBytes`]. + #[cfg(not(feature = "abcipp"))] + fn extract_eth_events_vext( + tx_bytes: TxBytes, + ) -> ethereum_events::SignedVext { + let got = Tx::try_from(&tx_bytes[..]).unwrap(); + let got_signed_tx = + SignedTxData::try_from_slice(&got.data.unwrap()[..]).unwrap(); + let protocol_tx = + TxType::try_from_slice(&got_signed_tx.data.unwrap()[..]).unwrap(); + let protocol_tx = match protocol_tx { + TxType::Protocol(protocol_tx) => protocol_tx.tx, + _ => panic!("Test failed"), + }; + match protocol_tx { + ProtocolTxType::EthEventsVext(ext) => ext, + _ => panic!("Test failed"), } } - /// A transaction added to the proposal not provided by - /// Tendermint from the mempool - pub fn add(tx: TxBytes) -> TxRecord { - TxRecord { - action: TxAction::Added as i32, - tx, + /// Test if [`get_remaining_protocol_txs`] is working as expected. + #[test] + #[cfg(not(feature = "abcipp"))] + fn test_get_remaining_protocol_txs() { + // TODO(feature = "abcipp"): use a different tx type here + fn bertha_ext(at_height: u64) -> TxBytes { + let key = wallet::defaults::bertha_keypair(); + let ext = ethereum_events::Vext::empty( + at_height.into(), + wallet::defaults::bertha_address(), + ) + .sign(&key); + ProtocolTxType::EthEventsVext(ext).sign(&key).to_bytes() } + + let excluded_indices = [0, 1, 3, 5, 7]; + let all_txs: Vec<_> = (0..10).map(bertha_ext).collect(); + let expected_txs: Vec<_> = [2, 4, 6, 8, 9] + .into_iter() + .map(bertha_ext) + .map(extract_eth_events_vext) + .collect(); + + let set = { + let mut s = VecIndexSet::default(); + for idx in excluded_indices.iter().copied() { + s.insert(idx as usize); + } + s + }; + + let got_txs: Vec<_> = get_remaining_protocol_txs(&set, all_txs) + .map(extract_eth_events_vext) + .collect(); + assert_eq!(expected_txs, got_txs); } - /// Remove this transaction from the set provided - /// by Tendermint from the mempool - pub fn remove(tx: TxBytes) -> TxRecord { - TxRecord { - action: TxAction::Removed as i32, - tx, + #[cfg(feature = "abcipp")] + fn get_local_last_commit(shell: &TestShell) -> Option { + let evts = { + let validator_addr = shell + .mode + .get_validator_address() + .expect("Test failed") + .to_owned(); + + let prev_height = shell.storage.last_height; + + let ext = ethereum_events::Vext::empty(prev_height, validator_addr); + + let protocol_key = match &shell.mode { + ShellMode::Validator { data, .. } => { + &data.keys.protocol_keypair + } + _ => panic!("Test failed"), + }; + + ext.sign(protocol_key) + }; + + let vote_extension = VoteExtension { + ethereum_events: evts, + validator_set_update: None, } - } -} + .try_to_vec() + .expect("Test failed"); -#[cfg(test)] -mod test_prepare_proposal { - use borsh::BorshSerialize; - use namada::types::storage::Epoch; - use namada::types::transaction::{Fee, WrapperTx}; + let vote = ExtendedVoteInfo { + vote_extension, + ..Default::default() + }; - use super::*; - use crate::node::ledger::shell::test_utils::{gen_keypair, TestShell}; + Some(ExtendedCommitInfo { + votes: vec![vote], + ..Default::default() + }) + } /// Test that if a tx from the mempool is not a /// WrapperTx type, it is not included in the /// proposed block. + // TODO: remove this test after CheckTx implements + // filtering of invalid txs; otherwise, we would have + // needed to return invalid txs from PrepareProposal, + // for these to get removed from a node's mempool. + // not returning invalid txs from PrepareProposal is + // a DoS vector, because the mempool will slowly fill + // up with garbage. luckily, Tendermint implements a + // mempool eviction policy, but honest client's txs + // may get lost in the process #[test] fn test_prepare_proposal_rejects_non_wrapper_tx() { - let (shell, _) = TestShell::new(); - let tx = Tx::new( + let (mut shell, _recv, _) = test_utils::setup_at_height(3u64); + let non_wrapper_tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction_data".as_bytes().to_owned()), ); let req = RequestPrepareProposal { - txs: vec![tx.to_bytes()], - max_tx_bytes: 0, + #[cfg(feature = "abcipp")] + local_last_commit: get_local_last_commit(&shell), + txs: vec![non_wrapper_tx.to_bytes()], ..Default::default() }; #[cfg(feature = "abcipp")] + assert_eq!(shell.prepare_proposal(req).txs.len(), 1); + #[cfg(not(feature = "abcipp"))] + assert_eq!(shell.prepare_proposal(req).txs.len(), 0); + } + + /// Check if we are filtering out an invalid vote extension `vext` + fn check_eth_events_filtering( + shell: &mut TestShell, + vext: Signed, + ) { + let filtered_votes: Vec<_> = + shell.filter_invalid_eth_events_vexts(vec![vext]).collect(); + + assert_eq!(filtered_votes, vec![]); + } + + /// Test if we are filtering out Ethereum events with bad + /// signatures in a prepare proposal. + #[test] + fn test_prepare_proposal_filter_out_bad_vext_signatures() { + const LAST_HEIGHT: BlockHeight = BlockHeight(2); + + let (mut shell, _recv, _) = test_utils::setup(); + + // artificially change the block height + shell.storage.last_height = LAST_HEIGHT; + + let signed_vote_extension = { + let (protocol_key, _, _) = wallet::defaults::validator_keys(); + let validator_addr = wallet::defaults::validator_address(); + + // generate a valid signature + let mut ext = ethereum_events::Vext { + validator_addr, + block_height: LAST_HEIGHT, + ethereum_events: vec![], + } + .sign(&protocol_key); + assert!(ext.verify(&protocol_key.ref_to()).is_ok()); + + // modify this signature such that it becomes invalid + ext.sig = test_utils::invalidate_signature(ext.sig); + ext + }; + + check_eth_events_filtering(&mut shell, signed_vote_extension); + } + + /// Test if we are filtering out Ethereum events seen at + /// block heights different than the last height. + #[test] + fn test_prepare_proposal_filter_out_bad_vext_bheights() { + const LAST_HEIGHT: BlockHeight = BlockHeight(3); + const PRED_LAST_HEIGHT: BlockHeight = BlockHeight(LAST_HEIGHT.0 - 1); + + let (mut shell, _recv, _) = test_utils::setup(); + + // artificially change the block height + shell.storage.last_height = LAST_HEIGHT; + + let (protocol_key, _, _) = wallet::defaults::validator_keys(); + let validator_addr = wallet::defaults::validator_address(); + + let signed_vote_extension = { + let ext = ethereum_events::Vext { + validator_addr, + block_height: PRED_LAST_HEIGHT, + ethereum_events: vec![], + } + .sign(&protocol_key); + assert!(ext.verify(&protocol_key.ref_to()).is_ok()); + ext + }; + + #[cfg(feature = "abcipp")] + check_eth_events_filtering(&mut shell, signed_vote_extension); + + #[cfg(not(feature = "abcipp"))] + { + let filtered_votes: Vec<_> = shell + .filter_invalid_eth_events_vexts(vec![ + signed_vote_extension.clone(), + ]) + .collect(); + assert_eq!( + filtered_votes, + vec![( + test_utils::get_validator_bonded_stake(), + signed_vote_extension + )] + ) + } + } + + /// Test if we are filtering out Ethereum events seen by + /// non-validator nodes. + #[test] + fn test_prepare_proposal_filter_out_bad_vext_validators() { + const LAST_HEIGHT: BlockHeight = BlockHeight(2); + + let (mut shell, _recv, _) = test_utils::setup(); + + // artificially change the block height + shell.storage.last_height = LAST_HEIGHT; + + let (validator_addr, protocol_key) = { + let bertha_key = wallet::defaults::bertha_keypair(); + let bertha_addr = wallet::defaults::bertha_address(); + (bertha_addr, bertha_key) + }; + + let signed_vote_extension = { + let ext = ethereum_events::Vext { + validator_addr, + block_height: LAST_HEIGHT, + ethereum_events: vec![], + } + .sign(&protocol_key); + assert!(ext.verify(&protocol_key.ref_to()).is_ok()); + ext + }; + + check_eth_events_filtering(&mut shell, signed_vote_extension); + } + + /// Test if we are filtering out duped Ethereum events in + /// prepare proposals. + #[test] + fn test_prepare_proposal_filter_duped_ethereum_events() { + const LAST_HEIGHT: BlockHeight = BlockHeight(3); + + let (mut shell, _recv, _) = test_utils::setup(); + + // artificially change the block height + shell.storage.last_height = LAST_HEIGHT; + + let (protocol_key, _, _) = wallet::defaults::validator_keys(); + let validator_addr = wallet::defaults::validator_address(); + + let ethereum_event = EthereumEvent::TransfersToNamada { + nonce: 1u64.into(), + transfers: vec![], + }; + let signed_vote_extension = { + let ev = ethereum_event; + let ext = ethereum_events::Vext { + validator_addr, + block_height: LAST_HEIGHT, + ethereum_events: vec![ev.clone(), ev.clone(), ev], + } + .sign(&protocol_key); + assert!(ext.verify(&protocol_key.ref_to()).is_ok()); + ext + }; + + let maybe_digest = + shell.compress_ethereum_events(vec![signed_vote_extension]); + + #[cfg(feature = "abcipp")] + { + // we should be filtering out the vote extension with + // duped ethereum events; therefore, no valid vote + // extensions will remain, and we will get no + // digest from compressing nil vote extensions + assert!(maybe_digest.is_none()); + } + + #[cfg(not(feature = "abcipp"))] + { + use assert_matches::assert_matches; + + assert_matches!(maybe_digest, Some(d) if d.signatures.is_empty()); + } + } + + /// Creates an Ethereum events digest manually. + #[cfg(feature = "abcipp")] + fn manually_assemble_digest( + _protocol_key: &common::SecretKey, + ext: Signed, + last_height: BlockHeight, + ) -> ethereum_events::VextDigest { + use namada::types::vote_extensions::ethereum_events::MultiSignedEthEvent; + + let events = vec![MultiSignedEthEvent { + event: ext.data.ethereum_events[0].clone(), + signers: { + let mut s = BTreeSet::new(); + s.insert((ext.data.validator_addr.clone(), last_height)); + s + }, + }]; + let signatures = { + let mut s = HashMap::new(); + s.insert( + (ext.data.validator_addr.clone(), last_height), + ext.sig.clone(), + ); + s + }; + + let vote_extension_digest = + ethereum_events::VextDigest { events, signatures }; + assert_eq!( - shell.prepare_proposal(req).tx_records, - vec![record::remove(tx.to_bytes())] + vec![ext], + vote_extension_digest.clone().decompress(last_height) ); + + vote_extension_digest + } + + /// Test if Ethereum events validation and inclusion in a block + /// behaves as expected, considering honest validators. + #[cfg(feature = "abcipp")] + #[test] + fn test_prepare_proposal_vext_normal_op() { + const LAST_HEIGHT: BlockHeight = BlockHeight(3); + + let (mut shell, _recv, _) = test_utils::setup(); + + // artificially change the block height + shell.storage.last_height = LAST_HEIGHT; + + let (protocol_key, _, _) = wallet::defaults::validator_keys(); + let validator_addr = wallet::defaults::validator_address(); + + let ethereum_event = EthereumEvent::TransfersToNamada { + nonce: 1u64.into(), + transfers: vec![], + }; + let ethereum_events = { + let ext = ethereum_events::Vext { + validator_addr, + block_height: LAST_HEIGHT, + ethereum_events: vec![ethereum_event], + } + .sign(&protocol_key); + assert!(ext.verify(&protocol_key.ref_to()).is_ok()); + ext + }; + let vote_extension = VoteExtension { + ethereum_events, + validator_set_update: None, + }; + let vote = ExtendedVoteInfo { + vote_extension: vote_extension.try_to_vec().unwrap(), + ..Default::default() + }; + + let mut rsp = shell.prepare_proposal(RequestPrepareProposal { + local_last_commit: Some(ExtendedCommitInfo { + votes: vec![vote], + ..Default::default() + }), + ..Default::default() + }); + let rsp_digest = { + assert_eq!(rsp.txs.len(), 1); + let tx_bytes = rsp.txs.remove(0); + let got = Tx::try_from(tx_bytes.as_slice()).expect("Test failed"); + let got_signed_tx = + SignedTxData::try_from_slice(&got.data.unwrap()[..]).unwrap(); + let protocol_tx = + TxType::try_from_slice(&got_signed_tx.data.unwrap()[..]) + .unwrap(); + + let protocol_tx = match protocol_tx { + TxType::Protocol(protocol_tx) => protocol_tx.tx, + _ => panic!("Test failed"), + }; + + match protocol_tx { + ProtocolTxType::EthereumEvents(digest) => digest, + _ => panic!("Test failed"), + } + }; + + let digest = manually_assemble_digest( + &protocol_key, + vote_extension.ethereum_events, + LAST_HEIGHT, + ); + + assert_eq!(rsp_digest, digest); + } + + /// Test if Ethereum events validation and inclusion in a block + /// behaves as expected, considering honest validators. + #[cfg(not(feature = "abcipp"))] + #[test] + fn test_prepare_proposal_vext_normal_op() { + const LAST_HEIGHT: BlockHeight = BlockHeight(3); + + let (mut shell, _recv, _) = test_utils::setup(); + + // artificially change the block height + shell.storage.last_height = LAST_HEIGHT; + + let (protocol_key, _, _) = wallet::defaults::validator_keys(); + let validator_addr = wallet::defaults::validator_address(); + + let ethereum_event = EthereumEvent::TransfersToNamada { + nonce: 1u64.into(), + transfers: vec![], + }; + let ext = { + let ext = ethereum_events::Vext { + validator_addr, + block_height: LAST_HEIGHT, + ethereum_events: vec![ethereum_event], + } + .sign(&protocol_key); + assert!(ext.verify(&protocol_key.ref_to()).is_ok()); + ext + }; + + let rsp_ext = { + let tx = ProtocolTxType::EthEventsVext(ext.clone()) + .sign(&protocol_key) + .to_bytes(); + let mut rsp = shell.prepare_proposal(RequestPrepareProposal { + txs: vec![tx], + ..Default::default() + }); + assert_eq!(rsp.txs.len(), 1); + + let tx_bytes = rsp.txs.remove(0); + extract_eth_events_vext(tx_bytes) + }; + + assert_eq!(rsp_ext, ext); + } + + /// Test if Ethereum events validation and inclusion in a block + /// behaves as expected, considering <= 2/3 voting power. + #[test] + #[cfg_attr( + feature = "abcipp", + should_panic(expected = "A Tendermint quorum should never") + )] + fn test_prepare_proposal_vext_insufficient_voting_power() { + const FIRST_HEIGHT: BlockHeight = BlockHeight(0); + const LAST_HEIGHT: BlockHeight = BlockHeight(FIRST_HEIGHT.0 + 11); + + let (mut shell, _recv, _) = test_utils::setup(); + + // artificially change the voting power of the default validator to + // zero, change the block height, and commit a dummy block, + // to move to a new epoch + let events_epoch = + shell.storage.get_epoch(FIRST_HEIGHT).expect("Test failed"); + let validator_set = { + let params = shell.storage.read_pos_params(); + let mut epochs = shell.storage.read_validator_set(); + let mut data = + epochs.get(events_epoch).cloned().expect("Test failed"); + + data.active = data + .active + .iter() + .cloned() + .map(|v| WeightedValidator { + bonded_stake: 0, + ..v + }) + .collect(); + + epochs.set(data, events_epoch, ¶ms); + epochs + }; + shell.storage.write_validator_set(&validator_set); + + let mut req = FinalizeBlock::default(); + req.header.time = namada::types::time::DateTimeUtc::now(); + shell.storage.last_height = LAST_HEIGHT; + shell.finalize_block(req).expect("Test failed"); + shell.commit(); + + assert_eq!( + shell + .storage + .get_epoch(shell.storage.get_current_decision_height()), + Some(Epoch(1)) + ); + + // test prepare proposal + let (protocol_key, _, _) = wallet::defaults::validator_keys(); + let validator_addr = wallet::defaults::validator_address(); + + let ethereum_event = EthereumEvent::TransfersToNamada { + nonce: 1u64.into(), + transfers: vec![], + }; + let signed_eth_ev_vote_extension = { + let ext = ethereum_events::Vext { + validator_addr, + block_height: LAST_HEIGHT, + ethereum_events: vec![ethereum_event], + } + .sign(&protocol_key); + assert!(ext.verify(&protocol_key.ref_to()).is_ok()); + ext + }; + #[cfg(feature = "abcipp")] + { + let vote_extension = VoteExtension { + ethereum_events: signed_eth_ev_vote_extension, + validator_set_update: None, + }; + let vote = ExtendedVoteInfo { + vote_extension: vote_extension.try_to_vec().unwrap(), + ..Default::default() + }; + // this should panic + shell.prepare_proposal(RequestPrepareProposal { + local_last_commit: Some(ExtendedCommitInfo { + votes: vec![vote], + ..Default::default() + }), + ..Default::default() + }); + } #[cfg(not(feature = "abcipp"))] - assert!(shell.prepare_proposal(req).txs.is_empty()); + { + let vote = ProtocolTxType::EthEventsVext( + signed_eth_ev_vote_extension.clone(), + ) + .sign(&protocol_key) + .to_bytes(); + let mut rsp = shell.prepare_proposal(RequestPrepareProposal { + txs: vec![vote], + ..Default::default() + }); + assert_eq!(rsp.txs.len(), 1); + + let tx_bytes = rsp.txs.remove(0); + let got = Tx::try_from(&tx_bytes[..]).unwrap(); + let got_signed_tx = + SignedTxData::try_from_slice(&got.data.unwrap()[..]).unwrap(); + let protocol_tx = + TxType::try_from_slice(&got_signed_tx.data.unwrap()[..]) + .unwrap(); + let protocol_tx = match protocol_tx { + TxType::Protocol(protocol_tx) => protocol_tx.tx, + _ => panic!("Test failed"), + }; + + let rsp_ext = match protocol_tx { + ProtocolTxType::EthEventsVext(ext) => ext, + _ => panic!("Test failed"), + }; + + assert_eq!(signed_eth_ev_vote_extension, rsp_ext); + } } /// Test that if an error is encountered while /// trying to process a tx from the mempool, /// we simply exclude it from the proposal + // TODO: see note on `test_prepare_proposal_rejects_non_wrapper_tx` #[test] fn test_error_in_processing_tx() { - let (shell, _) = TestShell::new(); + let (mut shell, _recv, _) = test_utils::setup_at_height(3u64); let keypair = gen_keypair(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), @@ -203,17 +1120,15 @@ mod test_prepare_proposal { .to_bytes(); #[allow(clippy::redundant_clone)] let req = RequestPrepareProposal { + #[cfg(feature = "abcipp")] + local_last_commit: get_local_last_commit(&shell), txs: vec![wrapper.clone()], - max_tx_bytes: 0, ..Default::default() }; #[cfg(feature = "abcipp")] - assert_eq!( - shell.prepare_proposal(req).tx_records, - vec![record::remove(wrapper)] - ); + assert_eq!(shell.prepare_proposal(req).txs.len(), 1); #[cfg(not(feature = "abcipp"))] - assert!(shell.prepare_proposal(req).txs.is_empty()); + assert_eq!(shell.prepare_proposal(req).txs.len(), 0); } /// Test that the decrypted txs are included @@ -221,14 +1136,13 @@ mod test_prepare_proposal { /// corresponding wrappers #[test] fn test_decrypted_txs_in_correct_order() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _recv, _) = test_utils::setup(); let keypair = gen_keypair(); let mut expected_wrapper = vec![]; let mut expected_decrypted = vec![]; let mut req = RequestPrepareProposal { txs: vec![], - max_tx_bytes: 0, ..Default::default() }; // create a request with two new wrappers from mempool and @@ -256,58 +1170,26 @@ mod test_prepare_proposal { expected_wrapper.push(wrapper.clone()); req.txs.push(wrapper.to_bytes()); } - // we extract the inner data from the txs for testing - // equality since otherwise changes in timestamps would - // fail the test - expected_wrapper.append(&mut expected_decrypted); - let expected_txs: Vec> = expected_wrapper - .iter() - .map(|tx| tx.data.clone().expect("Test failed")) + let expected_txs: Vec = expected_decrypted + .into_iter() + .chain(expected_wrapper.into_iter()) + // we extract the inner data from the txs for testing + // equality since otherwise changes in timestamps would + // fail the test + .map(|tx| tx.data.expect("Test failed")) .collect(); - #[cfg(feature = "abcipp")] - { - let received: Vec> = shell - .prepare_proposal(req) - .tx_records - .iter() - .filter_map( - |TxRecord { - tx: tx_bytes, - action, - }| { - if *action == (TxAction::Unmodified as i32) - || *action == (TxAction::Added as i32) - { - Some( - Tx::try_from(tx_bytes.as_slice()) - .expect("Test failed") - .data - .expect("Test failed"), - ) - } else { - None - } - }, - ) - .collect(); - // check that the order of the txs is correct - assert_eq!(received, expected_txs); - } - #[cfg(not(feature = "abcipp"))] - { - let received: Vec> = shell - .prepare_proposal(req) - .txs - .into_iter() - .map(|tx_bytes| { - Tx::try_from(tx_bytes.as_slice()) - .expect("Test failed") - .data - .expect("Test failed") - }) - .collect(); - // check that the order of the txs is correct - assert_eq!(received, expected_txs); - } + let received: Vec = shell + .prepare_proposal(req) + .txs + .into_iter() + .map(|tx_bytes| { + Tx::try_from(tx_bytes.as_slice()) + .expect("Test failed") + .data + .expect("Test failed") + }) + .collect(); + // check that the order of the txs is correct + assert_eq!(received, expected_txs); } } diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 67ca13101e..1a0004506e 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -1,10 +1,72 @@ //! Implementation of the ['VerifyHeader`], [`ProcessProposal`], //! and [`RevertProposal`] ABCI++ methods for the Shell +use data_encoding::HEXUPPER; +use namada::core::hints; +use namada::core::ledger::storage::Storage; +use namada::ledger::pos::{PosQueries, SendValsetUpd}; +use namada::types::transaction::protocol::ProtocolTxType; +#[cfg(feature = "abcipp")] +use namada::types::voting_power::FractionalVotingPower; + use super::*; use crate::facade::tendermint_proto::abci::response_process_proposal::ProposalStatus; use crate::facade::tendermint_proto::abci::RequestProcessProposal; +use crate::node::ledger::shell::block_space_alloc::{ + threshold, AllocFailure, TxBin, +}; use crate::node::ledger::shims::abcipp_shim_types::shim::response::ProcessProposal; +use crate::node::ledger::shims::abcipp_shim_types::shim::TxBytes; + +/// Validation metadata, to keep track of used resources or +/// transaction numbers, in a block proposal. +#[derive(Default)] +pub struct ValidationMeta { + /// Vote extension digest counters. + #[cfg(feature = "abcipp")] + pub digests: DigestCounters, + /// Space utilized by encrypted txs. + pub encrypted_txs_bin: TxBin, + /// Space utilized by all txs. + pub txs_bin: TxBin, + /// Check if the decrypted tx queue has any elements + /// left. + /// + /// This field will only evaluate to true if a block + /// proposer didn't include all decrypted txs in a block. + pub decrypted_queue_has_remaining_txs: bool, +} + +impl From<&Storage> for ValidationMeta +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + fn from(storage: &Storage) -> Self { + let max_proposal_bytes = storage.get_max_proposal_bytes().get(); + let encrypted_txs_bin = + TxBin::init_over_ratio(max_proposal_bytes, threshold::ONE_THIRD); + let txs_bin = TxBin::init(max_proposal_bytes); + Self { + #[cfg(feature = "abcipp")] + digests: DigestCounters::default(), + decrypted_queue_has_remaining_txs: false, + encrypted_txs_bin, + txs_bin, + } + } +} + +/// Contains stateful data about the number of vote extension +/// digests found as protocol transactions in a proposed block. +#[derive(Default)] +#[cfg(feature = "abcipp")] +pub struct DigestCounters { + /// The number of Ethereum events vote extensions found thus far. + pub eth_ev_digest_num: usize, + /// The number of validator set update vote extensions found thus far. + pub valset_upd_digest_num: usize, +} impl Shell where @@ -23,30 +85,254 @@ where /// but we only reject the entire block if the order of the /// included txs violates the order decided upon in the previous /// block. + #[cfg(feature = "abcipp")] pub fn process_proposal( &self, req: RequestProcessProposal, ) -> ProcessProposal { - let tx_results = self.process_txs(&req.txs); + tracing::info!( + proposer = ?HEXUPPER.encode(&req.proposer_address), + height = req.height, + hash = ?HEXUPPER.encode(&req.hash), + n_txs = req.txs.len(), + "Received block proposal", + ); + let (tx_results, metadata) = self.check_proposal(&req.txs); + + // We should not have more than one `ethereum_events::VextDigest` in + // a proposal from some round's leader. + let invalid_num_of_eth_ev_digests = + !self.has_proper_eth_events_num(&metadata); + if invalid_num_of_eth_ev_digests { + tracing::warn!( + proposer = ?HEXUPPER.encode(&req.proposer_address), + height = req.height, + hash = ?HEXUPPER.encode(&req.hash), + eth_ev_digest_num = metadata.digests.eth_ev_digest_num, + "Found invalid number of Ethereum events vote extension digests, proposed block \ + will be rejected" + ); + } + + let invalid_num_of_valset_upd_digests = + !self.has_proper_valset_upd_num(&metadata); + if invalid_num_of_valset_upd_digests { + tracing::warn!( + proposer = ?HEXUPPER.encode(&req.proposer_address), + height = req.height, + hash = ?HEXUPPER.encode(&req.hash), + valset_upd_digest_num = metadata.digests.valset_upd_digest_num, + "Found invalid number of validator set update vote extension digests, proposed block \ + will be rejected" + ); + } + + // Erroneous transactions were detected when processing + // the leader's proposal. We allow txs that do not + // deserialize properly, that have invalid signatures + // and that have invalid wasm code to reach FinalizeBlock. + let invalid_txs = tx_results.iter().any(|res| { + let error = ErrorCodes::from_u32(res.code).expect( + "All error codes returned from process_single_tx are valid", + ); + !error.is_recoverable() + }); + if invalid_txs { + tracing::warn!( + proposer = ?HEXUPPER.encode(&req.proposer_address), + height = req.height, + hash = ?HEXUPPER.encode(&req.hash), + "Found invalid transactions, proposed block will be rejected" + ); + } + + let has_remaining_decrypted_txs = + metadata.decrypted_queue_has_remaining_txs; + if has_remaining_decrypted_txs { + tracing::warn!( + proposer = ?HEXUPPER.encode(&req.proposer_address), + height = req.height, + hash = ?HEXUPPER.encode(&req.hash), + "Not all decrypted txs from the previous height were included in + the proposal, the block will be rejected" + ); + } + + let will_reject_proposal = invalid_num_of_eth_ev_digests + || invalid_num_of_valset_upd_digests + || invalid_txs + || has_remaining_decrypted_txs; + + let status = if will_reject_proposal { + ProposalStatus::Reject + } else { + ProposalStatus::Accept + }; ProcessProposal { - status: if tx_results.iter().any(|res| res.code > 3) { - ProposalStatus::Reject as i32 - } else { - ProposalStatus::Accept as i32 - }, + status: status as i32, tx_results, } } - /// Check all the given txs. - pub fn process_txs(&self, txs: &[Vec]) -> Vec { + /// Check all the txs in a block. Some txs may be incorrect, + /// but we only reject the entire block if the order of the + /// included txs violates the order decided upon in the previous + /// block. + #[cfg(not(feature = "abcipp"))] + pub fn process_proposal( + &self, + req: RequestProcessProposal, + ) -> ProcessProposal { + tracing::info!( + proposer = ?HEXUPPER.encode(&req.proposer_address), + height = req.height, + hash = ?HEXUPPER.encode(&req.hash), + n_txs = req.txs.len(), + "Received block proposal", + ); + let (tx_results, meta) = self.check_proposal(&req.txs); + + // Erroneous transactions were detected when processing + // the leader's proposal. We allow txs that do not + // deserialize properly, that have invalid signatures + // and that have invalid wasm code to reach FinalizeBlock. + let invalid_txs = tx_results.iter().any(|res| { + let error = ErrorCodes::from_u32(res.code).expect( + "All error codes returned from process_single_tx are valid", + ); + !error.is_recoverable() + }); + if invalid_txs { + tracing::warn!( + proposer = ?HEXUPPER.encode(&req.proposer_address), + height = req.height, + hash = ?HEXUPPER.encode(&req.hash), + "Found invalid transactions, proposed block will be rejected" + ); + } + + let has_remaining_decrypted_txs = + meta.decrypted_queue_has_remaining_txs; + if has_remaining_decrypted_txs { + tracing::warn!( + proposer = ?HEXUPPER.encode(&req.proposer_address), + height = req.height, + hash = ?HEXUPPER.encode(&req.hash), + "Not all decrypted txs from the previous height were included in + the proposal, the block will be rejected" + ); + } + + let will_reject_proposal = invalid_txs || has_remaining_decrypted_txs; + + // TODO: check if tx queue still has txs left in it + + let status = if will_reject_proposal { + ProposalStatus::Reject + } else { + ProposalStatus::Accept + }; + + ProcessProposal { + status: status as i32, + tx_results, + } + } + + /// Evaluates the corresponding [`TxResult`] for each tx in the + /// proposal. Additionally, counts the number of digest + /// txs and the bytes used by encrypted txs in the proposal. + /// + /// `ProcessProposal` should be able to make a decision on whether a + /// proposed block is acceptable or not based solely on what this + /// function returns. + pub fn check_proposal( + &self, + txs: &[TxBytes], + ) -> (Vec, ValidationMeta) { let mut tx_queue_iter = self.storage.tx_queue.iter(); - txs.iter() + let mut metadata = ValidationMeta::from(&self.storage); + let tx_results: Vec<_> = txs + .iter() .map(|tx_bytes| { - self.process_single_tx(tx_bytes, &mut tx_queue_iter) + self.check_proposal_tx( + tx_bytes, + &mut tx_queue_iter, + &mut metadata, + ) }) - .collect() + .collect(); + metadata.decrypted_queue_has_remaining_txs = + !self.storage.tx_queue.is_empty() && tx_queue_iter.next().is_some(); + (tx_results, metadata) + } + + /// Validates a list of vote extensions, included in PrepareProposal. + /// + /// If a vote extension is [`Some`], then it was validated properly, + /// and the voting power of the validator who signed it is considered + /// in the sum of the total voting power of all received vote extensions. + fn validate_vexts_in_proposal(&self, mut vote_extensions: I) -> TxResult + where + I: Iterator>, + { + #[cfg(feature = "abcipp")] + let mut voting_power = FractionalVotingPower::default(); + #[cfg(feature = "abcipp")] + let total_power = { + let epoch = self.storage.get_epoch(self.storage.last_height); + u64::from(self.storage.get_total_voting_power(epoch)) + }; + + if vote_extensions.all(|maybe_ext| { + maybe_ext + .map(|_power| { + #[cfg(feature = "abcipp")] + { + voting_power += FractionalVotingPower::new( + u64::from(_power), + total_power, + ) + .expect( + "The voting power we obtain from storage should \ + always be valid", + ); + } + }) + .is_some() + }) { + #[cfg(feature = "abcipp")] + if voting_power > FractionalVotingPower::TWO_THIRDS { + TxResult { + code: ErrorCodes::Ok.into(), + info: "Process proposal accepted this transaction".into(), + } + } else { + TxResult { + code: ErrorCodes::InvalidVoteExtension.into(), + info: "Process proposal rejected this proposal because \ + the backing stake of the vote extensions published \ + in the proposal was insufficient" + .into(), + } + } + + #[cfg(not(feature = "abcipp"))] + { + TxResult { + code: ErrorCodes::Ok.into(), + info: "Process proposal accepted this transaction".into(), + } + } + } else { + TxResult { + code: ErrorCodes::InvalidVoteExtension.into(), + info: "Process proposal rejected this proposal because at \ + least one of the vote extensions included was invalid." + .into(), + } + } } /// Checks if the Tx can be deserialized from bytes. Checks the fees and @@ -63,123 +349,254 @@ where /// 3: Wasm runtime error /// 4: Invalid order of decrypted txs /// 5. More decrypted txs than expected + /// 6. A transaction could not be decrypted + /// 7. An error in the vote extensions included in the proposal + /// 8. Not enough block space was available for some tx /// /// INVARIANT: Any changes applied in this method must be reverted if the /// proposal is rejected (unless we can simply overwrite them in the /// next block). - pub(crate) fn process_single_tx<'a>( + pub(crate) fn check_proposal_tx<'a>( &self, tx_bytes: &[u8], tx_queue_iter: &mut impl Iterator, + metadata: &mut ValidationMeta, ) -> TxResult { - let tx = match Tx::try_from(tx_bytes) { - Ok(tx) => tx, - Err(_) => { - return TxResult { + // try to allocate space for this tx + if let Err(e) = metadata.txs_bin.try_dump(tx_bytes) { + return TxResult { + code: ErrorCodes::AllocationError.into(), + info: match e { + AllocFailure::Rejected { .. } => { + "No more space left in the block" + } + AllocFailure::OverflowsBin { .. } => { + "The given tx is larger than the max configured \ + proposal size" + } + } + .into(), + }; + } + + let maybe_tx = Tx::try_from(tx_bytes).map_or_else( + |err| { + tracing::debug!( + ?err, + "Couldn't deserialize transaction received during \ + PrepareProposal" + ); + Err(TxResult { code: ErrorCodes::InvalidTx.into(), info: "The submitted transaction was not deserializable" .into(), - }; - } + }) + }, + |tx| { + process_tx(tx).map_err(|err| { + // This occurs if the wrapper / protocol tx signature is + // invalid + TxResult { + code: ErrorCodes::InvalidSig.into(), + info: err.to_string(), + } + }) + }, + ); + let tx = match maybe_tx { + Ok(tx) => tx, + Err(tx_result) => return tx_result, }; + // TODO: This should not be hardcoded let privkey = ::G2Affine::prime_subgroup_generator(); - match process_tx(tx) { - // This occurs if the wrapper / protocol tx signature is invalid - Err(err) => TxResult { - code: ErrorCodes::InvalidSig.into(), - info: err.to_string(), + match tx { + // If it is a raw transaction, we do no further validation + TxType::Raw(_) => TxResult { + code: ErrorCodes::InvalidTx.into(), + info: "Transaction rejected: Non-encrypted transactions are \ + not supported" + .into(), }, - Ok(result) => match result { - // If it is a raw transaction, we do no further validation - TxType::Raw(_) => TxResult { + TxType::Protocol(protocol_tx) => match protocol_tx.tx { + ProtocolTxType::EthEventsVext(ext) => self + .validate_eth_events_vext_and_get_it_back( + ext, + self.storage.last_height, + ) + .map(|_| TxResult { + code: ErrorCodes::Ok.into(), + info: "Process Proposal accepted this transaction" + .into(), + }) + .unwrap_or_else(|err| TxResult { + code: ErrorCodes::InvalidVoteExtension.into(), + info: format!( + "Process proposal rejected this proposal because \ + one of the included Ethereum events vote \ + extensions was invalid: {err}" + ), + }), + ProtocolTxType::ValSetUpdateVext(ext) => self + .validate_valset_upd_vext_and_get_it_back( + ext, + self.storage.last_height, + ) + .map(|_| TxResult { + code: ErrorCodes::Ok.into(), + info: "Process Proposal accepted this transaction" + .into(), + }) + .unwrap_or_else(|err| TxResult { + code: ErrorCodes::InvalidVoteExtension.into(), + info: format!( + "Process proposal rejected this proposal because \ + one of the included validator set update vote \ + extensions was invalid: {err}" + ), + }), + ProtocolTxType::EthereumEvents(digest) => { + #[cfg(feature = "abcipp")] + { + metadata.digests.eth_ev_digest_num += 1; + } + let extensions = + digest.decompress(self.storage.last_height); + let valid_extensions = + self.validate_eth_events_vext_list(extensions).map( + |maybe_ext| maybe_ext.ok().map(|(power, _)| power), + ); + + self.validate_vexts_in_proposal(valid_extensions) + } + ProtocolTxType::ValidatorSetUpdate(digest) => { + if !self.storage.can_send_validator_set_update( + SendValsetUpd::AtPrevHeight, + ) { + return TxResult { + code: ErrorCodes::InvalidVoteExtension.into(), + info: "Process proposal rejected a validator set \ + update vote extension issued at an invalid \ + block height" + .into(), + }; + } + #[cfg(feature = "abcipp")] + { + metadata.digests.valset_upd_digest_num += 1; + } + + let extensions = + digest.decompress(self.storage.last_height); + let valid_extensions = + self.validate_valset_upd_vext_list(extensions).map( + |maybe_ext| maybe_ext.ok().map(|(power, _)| power), + ); + + self.validate_vexts_in_proposal(valid_extensions) + } + _ => TxResult { code: ErrorCodes::InvalidTx.into(), - info: "Transaction rejected: Non-encrypted transactions \ - are not supported" - .into(), + info: "Unsupported protocol transaction type".into(), }, - TxType::Protocol(_) => TxResult { - code: ErrorCodes::InvalidTx.into(), - info: "Protocol transactions are a fun new feature that \ - is coming soon to a blockchain near you. Patience." - .into(), + }, + TxType::Decrypted(tx) => match tx_queue_iter.next() { + Some(wrapper) => { + if wrapper.tx_hash != tx.hash_commitment() { + TxResult { + code: ErrorCodes::InvalidOrder.into(), + info: "Process proposal rejected a decrypted \ + transaction that violated the tx order \ + determined in the previous block" + .into(), + } + } else if verify_decrypted_correctly(&tx, privkey) { + TxResult { + code: ErrorCodes::Ok.into(), + info: "Process Proposal accepted this transaction" + .into(), + } + } else { + TxResult { + code: ErrorCodes::InvalidTx.into(), + info: "The encrypted payload of tx was \ + incorrectly marked as un-decryptable" + .into(), + } + } + } + None => TxResult { + code: ErrorCodes::ExtraTxs.into(), + info: "Received more decrypted txs than expected".into(), }, - TxType::Decrypted(tx) => match tx_queue_iter.next() { - Some(wrapper) => { - if wrapper.tx_hash != tx.hash_commitment() { - TxResult { - code: ErrorCodes::InvalidOrder.into(), - info: "Process proposal rejected a decrypted \ - transaction that violated the tx order \ - determined in the previous block" - .into(), - } - } else if verify_decrypted_correctly(&tx, privkey) { - TxResult { - code: ErrorCodes::Ok.into(), - info: "Process Proposal accepted this \ - transaction" - .into(), + }, + TxType::Wrapper(tx) => { + // try to allocate space for this encrypted tx + if let Err(e) = metadata.encrypted_txs_bin.try_dump(tx_bytes) { + return TxResult { + code: ErrorCodes::AllocationError.into(), + info: match e { + AllocFailure::Rejected { .. } => { + "No more space left in the block for wrapper \ + txs" } - } else { - TxResult { - code: ErrorCodes::InvalidTx.into(), - info: "The encrypted payload of tx was \ - incorrectly marked as un-decryptable" - .into(), + AllocFailure::OverflowsBin { .. } => { + "The given wrapper tx is larger than 1/3 of \ + the available block space" } } - } - None => TxResult { - code: ErrorCodes::ExtraTxs.into(), - info: "Received more decrypted txs than expected" + .into(), + }; + } + if hints::unlikely(self.encrypted_txs_not_allowed()) { + return TxResult { + code: ErrorCodes::AllocationError.into(), + info: "Wrapper txs not allowed at the current block \ + height" .into(), - }, - }, - TxType::Wrapper(tx) => { - // validate the ciphertext via Ferveo - if !tx.validate_ciphertext() { + }; + } + + // validate the ciphertext via Ferveo + if !tx.validate_ciphertext() { + TxResult { + code: ErrorCodes::InvalidTx.into(), + info: format!( + "The ciphertext of the wrapped tx {} is invalid", + hash_tx(tx_bytes) + ), + } + } else { + // If the public key corresponds to the MASP sentinel + // transaction key, then the fee payer is effectively + // the MASP, otherwise derive + // the payer from public key. + let fee_payer = if tx.pk != masp_tx_key().ref_to() { + tx.fee_payer() + } else { + masp() + }; + // check that the fee payer has sufficient balance + let balance = + self.storage.get_balance(&tx.fee.token, &fee_payer); + + if tx.fee.amount <= balance { TxResult { - code: ErrorCodes::InvalidTx.into(), - info: format!( - "The ciphertext of the wrapped tx {} is \ - invalid", - hash_tx(tx_bytes) - ), + code: ErrorCodes::Ok.into(), + info: "Process proposal accepted this transaction" + .into(), } } else { - // If the public key corresponds to the MASP sentinel - // transaction key, then the fee payer is effectively - // the MASP, otherwise derive - // they payer from public key. - let fee_payer = if tx.pk != masp_tx_key().ref_to() { - tx.fee_payer() - } else { - masp() - }; - // check that the fee payer has sufficient balance - let balance = - self.get_balance(&tx.fee.token, &fee_payer); - - if tx.fee.amount <= balance { - TxResult { - code: ErrorCodes::Ok.into(), - info: "Process proposal accepted this \ - transaction" - .into(), - } - } else { - TxResult { - code: ErrorCodes::InvalidTx.into(), - info: "The address given does not have \ - sufficient balance to pay fee" - .into(), - } + TxResult { + code: ErrorCodes::InvalidTx.into(), + info: "The address given does not have sufficient \ + balance to pay fee" + .into(), } } } - }, + } } } @@ -189,33 +606,370 @@ where ) -> shim::response::RevertProposal { Default::default() } + + /// Checks if we have found the correct number of Ethereum events + /// vote extensions in [`DigestCounters`]. + #[cfg(feature = "abcipp")] + fn has_proper_eth_events_num(&self, meta: &ValidationMeta) -> bool { + self.storage.last_height.0 == 0 || meta.digests.eth_ev_digest_num == 1 + } + + /// Checks if we have found the correct number of validator set update + /// vote extensions in [`DigestCounters`]. + #[cfg(feature = "abcipp")] + fn has_proper_valset_upd_num(&self, meta: &ValidationMeta) -> bool { + if self + .storage + .can_send_validator_set_update(SendValsetUpd::AtPrevHeight) + { + self.storage.last_height.0 == 0 + || meta.digests.valset_upd_digest_num == 1 + } else { + true + } + } + + /// Checks if it is not possible to include encrypted txs at the current + /// block height. + fn encrypted_txs_not_allowed(&self) -> bool { + let is_2nd_height_off = self.storage.is_deciding_offset_within_epoch(1); + let is_3rd_height_off = self.storage.is_deciding_offset_within_epoch(2); + is_2nd_height_off || is_3rd_height_off + } } /// We test the failure cases of [`process_proposal`]. The happy flows /// are covered by the e2e tests. #[cfg(test)] mod test_process_proposal { + #[cfg(feature = "abcipp")] + use std::collections::HashMap; + + #[cfg(feature = "abcipp")] + use assert_matches::assert_matches; use borsh::BorshDeserialize; use namada::proto::SignedTxData; + use namada::types::ethereum_events::EthereumEvent; use namada::types::hash::Hash; use namada::types::key::*; use namada::types::storage::Epoch; - use namada::types::token::Amount; + use namada::types::token; use namada::types::transaction::encrypted::EncryptedTx; use namada::types::transaction::{EncryptionKey, Fee}; + use namada::types::vote_extensions::ethereum_events; + #[cfg(feature = "abcipp")] + use namada::types::vote_extensions::ethereum_events::MultiSignedEthEvent; use super::*; - use crate::facade::tendermint_proto::abci::RequestInitChain; - use crate::facade::tendermint_proto::google::protobuf::Timestamp; use crate::node::ledger::shell::test_utils::{ - gen_keypair, ProcessProposal, TestError, TestShell, + self, gen_keypair, ProcessProposal, TestError, TestShell, }; + #[cfg(feature = "abcipp")] + use crate::node::ledger::shims::abcipp_shim_types::shim::TxBytes; + use crate::wallet; + + #[cfg(feature = "abcipp")] + fn get_empty_eth_ev_digest(shell: &TestShell) -> TxBytes { + let protocol_key = shell.mode.get_protocol_key().expect("Test failed"); + let addr = shell + .mode + .get_validator_address() + .expect("Test failed") + .clone(); + let ext = ethereum_events::Vext::empty( + shell.storage.last_height, + addr.clone(), + ) + .sign(protocol_key); + ProtocolTxType::EthereumEvents(ethereum_events::VextDigest { + signatures: { + let mut s = HashMap::new(); + s.insert((addr, shell.storage.last_height), ext.sig); + s + }, + events: vec![], + }) + .sign(protocol_key) + .to_bytes() + } + + /// Test that if a proposal contains more than one + /// `ethereum_events::VextDigest`, we reject it. + #[test] + #[cfg(feature = "abcipp")] + fn test_more_than_one_vext_digest_rejected() { + const LAST_HEIGHT: BlockHeight = BlockHeight(2); + let (mut shell, _recv, _) = test_utils::setup(); + shell.storage.last_height = LAST_HEIGHT; + let (protocol_key, _, _) = wallet::defaults::validator_keys(); + let vote_extension_digest = { + let validator_addr = wallet::defaults::validator_address(); + let signed_vote_extension = { + let ext = ethereum_events::Vext::empty( + LAST_HEIGHT, + validator_addr.clone(), + ) + .sign(&protocol_key); + assert!(ext.verify(&protocol_key.ref_to()).is_ok()); + ext + }; + // Ethereum events digest with no observed events + ethereum_events::VextDigest { + signatures: { + let mut s = HashMap::new(); + s.insert( + (validator_addr, shell.storage.last_height), + signed_vote_extension.sig, + ); + s + }, + events: vec![], + } + }; + let tx = ProtocolTxType::EthereumEvents(vote_extension_digest) + .sign(&protocol_key) + .to_bytes(); + let request = ProcessProposal { + txs: vec![tx.clone(), tx], + }; + let results = shell.process_proposal(request); + assert_matches!( + results, Err(TestError::RejectProposal(s)) if s.len() == 2 + ); + } + + #[cfg(feature = "abcipp")] + fn check_rejected_eth_events_digest( + shell: &mut TestShell, + vote_extension_digest: ethereum_events::VextDigest, + protocol_key: common::SecretKey, + ) { + let tx = ProtocolTxType::EthereumEvents(vote_extension_digest) + .sign(&protocol_key) + .to_bytes(); + let request = ProcessProposal { txs: vec![tx] }; + let response = if let Err(TestError::RejectProposal(resp)) = + shell.process_proposal(request) + { + if let [resp] = resp.as_slice() { + resp.clone() + } else { + panic!("Test failed") + } + } else { + panic!("Test failed") + }; + assert_eq!( + response.result.code, + u32::from(ErrorCodes::InvalidVoteExtension) + ); + } + + #[cfg(not(feature = "abcipp"))] + fn check_rejected_eth_events( + shell: &mut TestShell, + vote_extension: ethereum_events::SignedVext, + protocol_key: common::SecretKey, + ) { + let tx = ProtocolTxType::EthEventsVext(vote_extension) + .sign(&protocol_key) + .to_bytes(); + let request = ProcessProposal { txs: vec![tx] }; + let response = if let Err(TestError::RejectProposal(resp)) = + shell.process_proposal(request) + { + if let [resp] = resp.as_slice() { + resp.clone() + } else { + panic!("Test failed") + } + } else { + panic!("Test failed") + }; + assert_eq!( + response.result.code, + u32::from(ErrorCodes::InvalidVoteExtension) + ); + } + + /// Test that if a proposal contains Ethereum events with + /// invalid validator signatures, we reject it. + #[test] + fn test_drop_vext_with_invalid_sigs() { + const LAST_HEIGHT: BlockHeight = BlockHeight(2); + let (mut shell, _recv, _) = test_utils::setup(); + shell.storage.last_height = LAST_HEIGHT; + let (protocol_key, _, _) = wallet::defaults::validator_keys(); + let addr = wallet::defaults::validator_address(); + let event = EthereumEvent::TransfersToNamada { + nonce: 1u64.into(), + transfers: vec![], + }; + let ext = { + // generate a valid signature + #[allow(clippy::redundant_clone)] + let mut ext = ethereum_events::Vext { + validator_addr: addr.clone(), + block_height: LAST_HEIGHT, + ethereum_events: vec![event.clone()], + } + .sign(&protocol_key); + assert!(ext.verify(&protocol_key.ref_to()).is_ok()); + + // modify this signature such that it becomes invalid + ext.sig = test_utils::invalidate_signature(ext.sig); + ext + }; + #[cfg(feature = "abcipp")] + { + let vote_extension_digest = ethereum_events::VextDigest { + signatures: { + let mut s = HashMap::new(); + s.insert( + (addr.clone(), shell.storage.last_height), + ext.sig, + ); + s + }, + events: vec![MultiSignedEthEvent { + event, + signers: { + let mut s = BTreeSet::new(); + s.insert((addr, shell.storage.last_height)); + s + }, + }], + }; + check_rejected_eth_events_digest( + &mut shell, + vote_extension_digest, + protocol_key, + ); + } + #[cfg(not(feature = "abcipp"))] + { + check_rejected_eth_events(&mut shell, ext, protocol_key); + } + } + + /// Test that if a proposal contains Ethereum events with + /// invalid block heights, we reject it. + #[test] + fn test_drop_vext_with_invalid_bheights() { + const LAST_HEIGHT: BlockHeight = BlockHeight(3); + #[cfg(feature = "abcipp")] + const INVALID_HEIGHT: BlockHeight = BlockHeight(LAST_HEIGHT.0 - 1); + #[cfg(not(feature = "abcipp"))] + const INVALID_HEIGHT: BlockHeight = BlockHeight(LAST_HEIGHT.0 + 1); + let (mut shell, _recv, _) = test_utils::setup(); + shell.storage.last_height = LAST_HEIGHT; + let (protocol_key, _, _) = wallet::defaults::validator_keys(); + let addr = wallet::defaults::validator_address(); + let event = EthereumEvent::TransfersToNamada { + nonce: 1u64.into(), + transfers: vec![], + }; + let ext = { + #[allow(clippy::redundant_clone)] + let ext = ethereum_events::Vext { + validator_addr: addr.clone(), + block_height: INVALID_HEIGHT, + ethereum_events: vec![event.clone()], + } + .sign(&protocol_key); + assert!(ext.verify(&protocol_key.ref_to()).is_ok()); + ext + }; + #[cfg(feature = "abcipp")] + { + let vote_extension_digest = ethereum_events::VextDigest { + signatures: { + let mut s = HashMap::new(); + s.insert((addr.clone(), INVALID_HEIGHT), ext.sig); + s + }, + events: vec![MultiSignedEthEvent { + event, + signers: { + let mut s = BTreeSet::new(); + s.insert((addr, INVALID_HEIGHT)); + s + }, + }], + }; + check_rejected_eth_events_digest( + &mut shell, + vote_extension_digest, + protocol_key, + ); + } + #[cfg(not(feature = "abcipp"))] + { + check_rejected_eth_events(&mut shell, ext, protocol_key); + } + } + + /// Test that if a proposal contains Ethereum events with + /// invalid validators, we reject it. + #[test] + fn test_drop_vext_with_invalid_validators() { + const LAST_HEIGHT: BlockHeight = BlockHeight(2); + let (mut shell, _recv, _) = test_utils::setup(); + shell.storage.last_height = LAST_HEIGHT; + let (addr, protocol_key) = { + let bertha_key = wallet::defaults::bertha_keypair(); + let bertha_addr = wallet::defaults::bertha_address(); + (bertha_addr, bertha_key) + }; + let event = EthereumEvent::TransfersToNamada { + nonce: 1u64.into(), + transfers: vec![], + }; + let ext = { + #[allow(clippy::redundant_clone)] + let ext = ethereum_events::Vext { + validator_addr: addr.clone(), + block_height: LAST_HEIGHT, + ethereum_events: vec![event.clone()], + } + .sign(&protocol_key); + assert!(ext.verify(&protocol_key.ref_to()).is_ok()); + ext + }; + #[cfg(feature = "abcipp")] + { + let vote_extension_digest = ethereum_events::VextDigest { + signatures: { + let mut s = HashMap::new(); + s.insert((addr.clone(), LAST_HEIGHT), ext.sig); + s + }, + events: vec![MultiSignedEthEvent { + event, + signers: { + let mut s = BTreeSet::new(); + s.insert((addr, LAST_HEIGHT)); + s + }, + }], + }; + check_rejected_eth_events_digest( + &mut shell, + vote_extension_digest, + protocol_key, + ); + } + #[cfg(not(feature = "abcipp"))] + { + check_rejected_eth_events(&mut shell, ext, protocol_key); + } + } /// Test that if a wrapper tx is not signed, it is rejected /// by [`process_proposal`]. #[test] fn test_unsigned_wrapper_rejected() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _recv, _) = test_utils::setup_at_height(3u64); let keypair = gen_keypair(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), @@ -237,20 +991,36 @@ mod test_process_proposal { Some(TxType::Wrapper(wrapper).try_to_vec().expect("Test failed")), ) .to_bytes(); - #[allow(clippy::redundant_clone)] - let request = ProcessProposal { - txs: vec![tx.clone()], - }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") + #[cfg(feature = "abcipp")] + let response = { + let request = ProcessProposal { + txs: vec![tx, get_empty_eth_ev_digest(&shell)], + }; + if let [resp, _] = shell + .process_proposal(request) + .expect("Test failed") + .as_slice() + { + resp.clone() + } else { + panic!("Test failed"); + } }; + #[cfg(not(feature = "abcipp"))] + let response = { + let request = ProcessProposal { txs: vec![tx] }; + if let [resp] = shell + .process_proposal(request) + .expect("Test failed") + .as_slice() + { + resp.clone() + } else { + panic!("Test failed") + } + }; + assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidSig)); assert_eq!( response.result.info, @@ -261,7 +1031,7 @@ mod test_process_proposal { /// Test that a wrapper tx with invalid signature is rejected #[test] fn test_wrapper_bad_signature_rejected() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _recv, _) = test_utils::setup_at_height(3u64); let keypair = gen_keypair(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), @@ -318,17 +1088,36 @@ mod test_process_proposal { } else { panic!("Test failed"); }; - let request = ProcessProposal { - txs: vec![new_tx.to_bytes()], + #[cfg(feature = "abcipp")] + let response = { + let request = ProcessProposal { + txs: vec![new_tx.to_bytes(), get_empty_eth_ev_digest(&shell)], + }; + + if let [resp, _] = shell + .process_proposal(request) + .expect("Test failed") + .as_slice() + { + resp.clone() + } else { + panic!("Test failed"); + } }; - let response = if let [response] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - response.clone() - } else { - panic!("Test failed") + #[cfg(not(feature = "abcipp"))] + let response = { + let request = ProcessProposal { + txs: vec![new_tx.to_bytes()], + }; + if let [resp] = shell + .process_proposal(request) + .expect("Test failed") + .as_slice() + { + resp.clone() + } else { + panic!("Test failed") + } }; let expected_error = "Signature verification failed: Invalid signature"; assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidSig)); @@ -344,8 +1133,8 @@ mod test_process_proposal { /// non-zero, [`process_proposal`] rejects that tx #[test] fn test_wrapper_unknown_address() { - let (mut shell, _) = TestShell::new(); - let keypair = crate::wallet::defaults::keys().remove(0).1; + let (mut shell, _recv, _) = test_utils::setup_at_height(3u64); + let keypair = gen_keypair(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), @@ -363,17 +1152,35 @@ mod test_process_proposal { ) .sign(&keypair) .expect("Test failed"); - let request = ProcessProposal { - txs: vec![wrapper.to_bytes()], + #[cfg(feature = "abcipp")] + let response = { + let request = ProcessProposal { + txs: vec![wrapper.to_bytes(), get_empty_eth_ev_digest(&shell)], + }; + if let [resp, _] = shell + .process_proposal(request) + .expect("Test failed") + .as_slice() + { + resp.clone() + } else { + panic!("Test failed"); + } }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") + #[cfg(not(feature = "abcipp"))] + let response = { + let request = ProcessProposal { + txs: vec![wrapper.to_bytes()], + }; + if let [resp] = shell + .process_proposal(request) + .expect("Test failed") + .as_slice() + { + resp.clone() + } else { + panic!("Test failed") + } }; assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); assert_eq!( @@ -388,15 +1195,7 @@ mod test_process_proposal { /// [`process_proposal`] rejects that tx #[test] fn test_wrapper_insufficient_balance_address() { - let (mut shell, _) = TestShell::new(); - shell.init_chain(RequestInitChain { - time: Some(Timestamp { - seconds: 0, - nanos: 0, - }), - chain_id: ChainId::default().to_string(), - ..Default::default() - }); + let (mut shell, _recv, _) = test_utils::setup_at_height(3u64); let keypair = crate::wallet::defaults::daewon_keypair(); let tx = Tx::new( @@ -405,7 +1204,7 @@ mod test_process_proposal { ); let wrapper = WrapperTx::new( Fee { - amount: Amount::whole(1_000_100), + amount: token::Amount::whole(1_000_100), token: shell.storage.native_token.clone(), }, &keypair, @@ -417,18 +1216,35 @@ mod test_process_proposal { .sign(&keypair) .expect("Test failed"); - let request = ProcessProposal { - txs: vec![wrapper.to_bytes()], + #[cfg(feature = "abcipp")] + let response = { + let request = ProcessProposal { + txs: vec![wrapper.to_bytes(), get_empty_eth_ev_digest(&shell)], + }; + if let [resp, _] = shell + .process_proposal(request) + .expect("Test failed") + .as_slice() + { + resp.clone() + } else { + panic!("Test failed"); + } }; - - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") + #[cfg(not(feature = "abcipp"))] + let response = { + let request = ProcessProposal { + txs: vec![wrapper.to_bytes()], + }; + if let [resp] = shell + .process_proposal(request) + .expect("Test failed") + .as_slice() + { + resp.clone() + } else { + panic!("Test failed") + } }; assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); assert_eq!( @@ -443,7 +1259,7 @@ mod test_process_proposal { /// validated, [`process_proposal`] rejects it #[test] fn test_decrypted_txs_out_of_order() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _recv, _) = test_utils::setup_at_height(3u64); let keypair = gen_keypair(); let mut txs = vec![]; for i in 0..3 { @@ -465,38 +1281,46 @@ mod test_process_proposal { shell.enqueue_tx(wrapper); txs.push(Tx::from(TxType::Decrypted(DecryptedTx::Decrypted(tx)))); } - let req_1 = ProcessProposal { - txs: vec![txs[0].to_bytes()], - }; - let response_1 = if let [resp] = shell - .process_proposal(req_1) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") - }; - assert_eq!(response_1.result.code, u32::from(ErrorCodes::Ok)); - - let req_2 = ProcessProposal { - txs: vec![txs[2].to_bytes()], + #[cfg(feature = "abcipp")] + let response = { + let request = ProcessProposal { + txs: vec![ + txs[0].to_bytes(), + txs[2].to_bytes(), + txs[1].to_bytes(), + get_empty_eth_ev_digest(&shell), + ], + }; + if let Err(TestError::RejectProposal(mut resp)) = + shell.process_proposal(request) + { + assert_eq!(resp.len(), 4); + resp.remove(1) + } else { + panic!("Test failed") + } }; - - let response_2 = if let Err(TestError::RejectProposal(resp)) = - shell.process_proposal(req_2) - { - if let [resp] = resp.as_slice() { - resp.clone() + #[cfg(not(feature = "abcipp"))] + let response = { + let request = ProcessProposal { + txs: vec![ + txs[0].to_bytes(), + txs[2].to_bytes(), + txs[1].to_bytes(), + ], + }; + if let Err(TestError::RejectProposal(mut resp)) = + shell.process_proposal(request) + { + assert_eq!(resp.len(), 3); + resp.remove(1) } else { panic!("Test failed") } - } else { - panic!("Test failed") }; - assert_eq!(response_2.result.code, u32::from(ErrorCodes::InvalidOrder)); + assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidOrder)); assert_eq!( - response_2.result.info, + response.result.info, String::from( "Process proposal rejected a decrypted transaction that \ violated the tx order determined in the previous block" @@ -508,7 +1332,7 @@ mod test_process_proposal { /// is rejected by [`process_proposal`] #[test] fn test_incorrectly_labelled_as_undecryptable() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _recv, _) = test_utils::setup_at_height(3u64); let keypair = gen_keypair(); let tx = Tx::new( @@ -531,18 +1355,35 @@ mod test_process_proposal { let tx = Tx::from(TxType::Decrypted(DecryptedTx::Undecryptable(wrapper))); - let request = ProcessProposal { - txs: vec![tx.to_bytes()], + #[cfg(feature = "abcipp")] + let response = { + let request = ProcessProposal { + txs: vec![tx.to_bytes(), get_empty_eth_ev_digest(&shell)], + }; + if let [resp, _] = shell + .process_proposal(request) + .expect("Test failed") + .as_slice() + { + resp.clone() + } else { + panic!("Test failed"); + } }; - - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") + #[cfg(not(feature = "abcipp"))] + let response = { + let request = ProcessProposal { + txs: vec![tx.to_bytes()], + }; + if let [resp] = shell + .process_proposal(request) + .expect("Test failed") + .as_slice() + { + resp.clone() + } else { + panic!("Test failed") + } }; assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); assert_eq!( @@ -559,15 +1400,7 @@ mod test_process_proposal { /// undecryptable but still accepted #[test] fn test_invalid_hash_commitment() { - let (mut shell, _) = TestShell::new(); - shell.init_chain(RequestInitChain { - time: Some(Timestamp { - seconds: 0, - nanos: 0, - }), - chain_id: ChainId::default().to_string(), - ..Default::default() - }); + let (mut shell, _recv, _) = test_utils::setup_at_height(3u64); let keypair = crate::wallet::defaults::daewon_keypair(); let tx = Tx::new( @@ -593,17 +1426,35 @@ mod test_process_proposal { wrapper.clone(), ))); - let request = ProcessProposal { - txs: vec![tx.to_bytes()], + #[cfg(feature = "abcipp")] + let response = { + let request = ProcessProposal { + txs: vec![tx.to_bytes(), get_empty_eth_ev_digest(&shell)], + }; + if let [resp, _] = shell + .process_proposal(request) + .expect("Test failed") + .as_slice() + { + resp.clone() + } else { + panic!("Test failed"); + } }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") + #[cfg(not(feature = "abcipp"))] + let response = { + let request = ProcessProposal { + txs: vec![tx.to_bytes()], + }; + if let [resp] = shell + .process_proposal(request) + .expect("Test failed") + .as_slice() + { + resp.clone() + } else { + panic!("Test failed") + } }; assert_eq!(response.result.code, u32::from(ErrorCodes::Ok)); } @@ -613,15 +1464,7 @@ mod test_process_proposal { /// marked undecryptable and the errors handled correctly #[test] fn test_undecryptable() { - let (mut shell, _) = TestShell::new(); - shell.init_chain(RequestInitChain { - time: Some(Timestamp { - seconds: 0, - nanos: 0, - }), - chain_id: ChainId::default().to_string(), - ..Default::default() - }); + let (mut shell, _recv, _) = test_utils::setup_at_height(3u64); let keypair = crate::wallet::defaults::daewon_keypair(); let pubkey = EncryptionKey::default(); // not valid tx bytes @@ -644,17 +1487,35 @@ mod test_process_proposal { #[allow(clippy::redundant_clone)] wrapper.clone(), ))); - let request = ProcessProposal { - txs: vec![signed.to_bytes()], + #[cfg(feature = "abcipp")] + let response = { + let request = ProcessProposal { + txs: vec![signed.to_bytes(), get_empty_eth_ev_digest(&shell)], + }; + if let [resp, _] = shell + .process_proposal(request) + .expect("Test failed") + .as_slice() + { + resp.clone() + } else { + panic!("Test failed"); + } }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") + #[cfg(not(feature = "abcipp"))] + let response = { + let request = ProcessProposal { + txs: vec![signed.to_bytes()], + }; + if let [resp] = shell + .process_proposal(request) + .expect("Test failed") + .as_slice() + { + resp.clone() + } else { + panic!("Test failed") + } }; assert_eq!(response.result.code, u32::from(ErrorCodes::Ok)); } @@ -663,7 +1524,7 @@ mod test_process_proposal { /// [`process_proposal`] than expected, they are rejected #[test] fn test_too_many_decrypted_txs() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _recv, _) = test_utils::setup_at_height(3u64); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), @@ -696,24 +1557,42 @@ mod test_process_proposal { /// Process Proposal should reject a RawTx, but not panic #[test] fn test_raw_tx_rejected() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _recv, _) = test_utils::setup_at_height(3u64); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), ); let tx = Tx::from(TxType::Raw(tx)); - let request = ProcessProposal { - txs: vec![tx.to_bytes()], + #[cfg(feature = "abcipp")] + let response = { + let request = ProcessProposal { + txs: vec![tx.to_bytes(), get_empty_eth_ev_digest(&shell)], + }; + if let [resp, _] = shell + .process_proposal(request) + .expect("Test failed") + .as_slice() + { + resp.clone() + } else { + panic!("Test failed"); + } }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") + #[cfg(not(feature = "abcipp"))] + let response = { + let request = ProcessProposal { + txs: vec![tx.to_bytes()], + }; + if let [resp] = shell + .process_proposal(request) + .expect("Test failed") + .as_slice() + { + resp.clone() + } else { + panic!("Test failed") + } }; assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); assert_eq!( @@ -724,4 +1603,74 @@ mod test_process_proposal { ), ); } + + /// Test if we reject wrapper txs when they shouldn't be included in blocks. + /// + /// Currently, the conditions to reject wrapper + /// txs are simply to check if we are at the 2nd + /// or 3rd height offset within an epoch. + #[test] + fn test_include_only_protocol_txs() { + let (mut shell, _recv, _) = test_utils::setup_at_height(1u64); + let keypair = gen_keypair(); + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some(b"transaction data".to_vec()), + ); + let wrapper = WrapperTx::new( + Fee { + amount: 1234.into(), + token: shell.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + ) + .sign(&keypair) + .expect("Test failed") + .to_bytes(); + for height in [1u64, 2] { + shell.storage.last_height = height.into(); + #[cfg(feature = "abcipp")] + let response = { + let request = ProcessProposal { + txs: vec![wrapper.clone(), get_empty_eth_ev_digest(&shell)], + }; + if let Err(TestError::RejectProposal(mut resp)) = + shell.process_proposal(request) + { + assert_eq!(resp.len(), 2); + resp.remove(0) + } else { + panic!("Test failed") + } + }; + #[cfg(not(feature = "abcipp"))] + let response = { + let request = ProcessProposal { + txs: vec![wrapper.clone()], + }; + if let Err(TestError::RejectProposal(mut resp)) = + shell.process_proposal(request) + { + assert_eq!(resp.len(), 1); + resp.remove(0) + } else { + panic!("Test failed") + } + }; + assert_eq!( + response.result.code, + u32::from(ErrorCodes::AllocationError) + ); + assert_eq!( + response.result.info, + String::from( + "Wrapper txs not allowed at the current block height" + ), + ); + } + } } diff --git a/apps/src/lib/node/ledger/shell/queries.rs b/apps/src/lib/node/ledger/shell/queries.rs index 77715246ab..e5df012af9 100644 --- a/apps/src/lib/node/ledger/shell/queries.rs +++ b/apps/src/lib/node/ledger/shell/queries.rs @@ -1,13 +1,11 @@ //! Shell methods for querying state -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; use ferveo_common::TendermintValidator; use namada::ledger::pos::into_tm_voting_power; use namada::ledger::queries::{RequestCtx, ResponseQuery}; -use namada::ledger::storage_api; -use namada::types::address::Address; +use namada::types::key; use namada::types::key::dkg_session_keys::DkgPublicKey; -use namada::types::{key, token}; use super::*; use crate::node::ledger::response; @@ -62,29 +60,11 @@ where } } - /// Simple helper function for the ledger to get balances - /// of the specified token at the specified address - pub fn get_balance( - &self, - token: &Address, - owner: &Address, - ) -> token::Amount { - let balance = storage_api::StorageRead::read( - &self.storage, - &token::balance_key(token, owner), - ); - // Storage read must not fail, but there might be no value, in which - // case default (0) is returned - balance - .expect("Storage read in the protocol must not fail") - .unwrap_or_default() - } - /// Lookup data about a validator from their protocol signing key #[allow(dead_code)] pub fn get_validator_from_protocol_pk( &self, - pk: &key::common::PublicKey, + pk: &common::PublicKey, ) -> Option> { let pk_bytes = pk .try_to_vec() @@ -134,3 +114,122 @@ where }) } } + +// NOTE: we are testing `namada::ledger::queries_ext`, +// which is not possible from `namada` since we do not have +// access to the `Shell` there +#[cfg(test)] +#[cfg(not(feature = "abcipp"))] +mod test_queries { + use namada::ledger::pos::{PosQueries, SendValsetUpd}; + use namada::types::storage::Epoch; + + use super::*; + use crate::node::ledger::shell::test_utils; + use crate::node::ledger::shims::abcipp_shim_types::shim::request::FinalizeBlock; + + macro_rules! test_can_send_validator_set_update { + (epoch_assertions: $epoch_assertions:expr $(,)?) => { + /// Test if [`QueriesExt::can_send_validator_set_update`] behaves as + /// expected. + #[test] + fn test_can_send_validator_set_update() { + let (mut shell, _recv, _) = test_utils::setup_at_height(0u64); + + let epoch_assertions = $epoch_assertions; + + // test `SendValsetUpd::Now` and `SendValsetUpd::AtPrevHeight` + for (curr_epoch, curr_block_height, can_send) in + epoch_assertions + { + shell.storage.last_height = + BlockHeight(curr_block_height - 1); + assert_eq!( + curr_block_height, + shell.storage.get_current_decision_height().0 + ); + assert_eq!( + shell.storage.get_epoch(curr_block_height.into()), + Some(Epoch(curr_epoch)) + ); + assert_eq!( + shell + .storage + .can_send_validator_set_update(SendValsetUpd::Now), + can_send, + ); + // TODO(feature = "abcipp"): test + // `SendValsetUpd::AtPrevHeight`; `idx` is the value + // of the current index being iterated over + // the array `epoch_assertions` + // + // ```ignore + // if let Some((epoch, height, can_send)) = + // epoch_assertions.get(_idx.wrapping_sub(1)).copied() + // { + // assert_eq!( + // shell.storage.get_epoch(height.into()), + // Some(Epoch(epoch)) + // ); + // assert_eq!( + // shell.storage.can_send_validator_set_update( + // SendValsetUpd::AtPrevHeight + // ), + // can_send, + // ); + // } + // ``` + let time = namada::types::time::DateTimeUtc::now(); + let mut req = FinalizeBlock::default(); + req.header.time = time; + shell.finalize_block(req).expect("Test failed"); + shell.commit(); + shell.storage.next_epoch_min_start_time = time; + } + } + }; + } + + #[cfg(feature = "abcipp")] + test_can_send_validator_set_update! { + // TODO(feature = "abcipp"): add some epoch assertions + epoch_assertions: [] + } + + #[cfg(not(feature = "abcipp"))] + test_can_send_validator_set_update! { + epoch_assertions: [ + // (current epoch, current block height, can send valset upd) + (0, 1, false), + (0, 2, true), + (0, 3, false), + (0, 4, false), + (0, 5, false), + (0, 6, false), + (0, 7, false), + (0, 8, false), + (0, 9, false), + // we will change epoch here + (0, 10, false), + (1, 11, true), + (1, 12, false), + (1, 13, false), + (1, 14, false), + (1, 15, false), + (1, 16, false), + (1, 17, false), + (1, 18, false), + (1, 19, false), + // we will change epoch here + (1, 20, false), + (2, 21, true), + (2, 22, false), + (2, 23, false), + (2, 24, false), + (2, 25, false), + (2, 26, false), + (2, 27, false), + (2, 28, false), + ], + } +} diff --git a/apps/src/lib/node/ledger/shell/vote_extensions.rs b/apps/src/lib/node/ledger/shell/vote_extensions.rs new file mode 100644 index 0000000000..99402b5f60 --- /dev/null +++ b/apps/src/lib/node/ledger/shell/vote_extensions.rs @@ -0,0 +1,388 @@ +//! Extend Tendermint votes with Ethereum bridge logic. + +pub mod eth_events; +pub mod val_set_update; + +#[cfg(feature = "abcipp")] +use borsh::BorshDeserialize; +#[cfg(not(feature = "abcipp"))] +use index_set::vec::VecIndexSet; +use namada::ledger::pos::{PosQueries, SendValsetUpd}; +use namada::proto::Signed; +use namada::types::transaction::protocol::ProtocolTxType; +#[cfg(feature = "abcipp")] +use namada::types::vote_extensions::VoteExtensionDigest; +use namada::types::vote_extensions::{ + ethereum_events, validator_set_update, VoteExtension, +}; + +use super::*; +#[cfg(feature = "abcipp")] +use crate::facade::tendermint_proto::abci::ExtendedVoteInfo; +#[cfg(not(feature = "abcipp"))] +use crate::node::ledger::shims::abcipp_shim_types::shim::TxBytes; + +/// Message to be passed to `.expect()` calls in this module. +const VALIDATOR_EXPECT_MSG: &str = "Only validators receive this method call."; + +/// The error yielded from validating faulty vote extensions in the shell +#[derive(Error, Debug)] +pub enum VoteExtensionError { + #[error("The vote extension was issued at block height 0.")] + IssuedAtGenesis, + #[error("The vote extension was issued for an unexpected block height.")] + UnexpectedBlockHeight, + #[error("The vote extension was issued for an unexpected epoch.")] + UnexpectedEpoch, + #[error( + "The vote extension contains duplicate or non-sorted Ethereum events." + )] + HaveDupesOrNonSorted, + #[error( + "The public key of the vote extension's associated validator could \ + not be found in storage." + )] + PubKeyNotInStorage, + #[error("The vote extension's signature is invalid.")] + VerifySigFailed, + #[error( + "Validator is missing from an expected field in the vote extension." + )] + ValidatorMissingFromExtension, + #[error( + "Found value for a field in the vote extension diverging from the \ + equivalent field in storage." + )] + DivergesFromStorage, +} + +impl Shell +where + D: DB + for<'iter> DBIter<'iter> + Sync + 'static, + H: StorageHasher + Sync + 'static, +{ + /// The ExtendVote ABCI++ method implementation. + /// + /// INVARIANT: This method must be stateless. + #[cfg(feature = "abcipp")] + #[inline] + pub fn extend_vote( + &mut self, + _req: request::ExtendVote, + ) -> response::ExtendVote { + response::ExtendVote { + vote_extension: self.craft_extension().try_to_vec().unwrap(), + } + } + + /// Creates the data to be added to a vote extension. + /// + /// INVARIANT: This method must be stateless. + #[inline] + pub fn craft_extension(&mut self) -> VoteExtension { + VoteExtension { + ethereum_events: self.extend_vote_with_ethereum_events(), + validator_set_update: self.extend_vote_with_valset_update(), + } + } + + /// Extend PreCommit votes with [`ethereum_events::Vext`] instances. + pub fn extend_vote_with_ethereum_events( + &mut self, + ) -> Signed { + let validator_addr = self + .mode + .get_validator_address() + .expect(VALIDATOR_EXPECT_MSG) + .to_owned(); + + let ext = ethereum_events::Vext { + #[cfg(feature = "abcipp")] + block_height: self.storage.get_current_decision_height(), + #[cfg(not(feature = "abcipp"))] + block_height: self.storage.last_height, + ethereum_events: self.new_ethereum_events(), + validator_addr, + }; + if !ext.ethereum_events.is_empty() { + tracing::info!( + new_ethereum_events.len = ext.ethereum_events.len(), + ?ext.block_height, + "Voting for new Ethereum events" + ); + tracing::debug!("New Ethereum events - {:#?}", ext.ethereum_events); + } + + let protocol_key = match &self.mode { + ShellMode::Validator { data, .. } => &data.keys.protocol_keypair, + _ => unreachable!("{VALIDATOR_EXPECT_MSG}"), + }; + + ext.sign(protocol_key) + } + + /// Extend PreCommit votes with [`validator_set_update::Vext`] + /// instances. + pub fn extend_vote_with_valset_update( + &mut self, + ) -> Option { + let validator_addr = self + .mode + .get_validator_address() + .expect(VALIDATOR_EXPECT_MSG) + .to_owned(); + + self.storage + .can_send_validator_set_update(SendValsetUpd::Now) + .then(|| { + let next_epoch = self.storage.get_current_epoch().0.next(); + let voting_powers = self + .storage + .get_active_eth_addresses(Some(next_epoch)) + .map(|(eth_addr_book, _, voting_power)| { + (eth_addr_book, voting_power) + }) + .collect(); + + let ext = validator_set_update::Vext { + validator_addr, + voting_powers, + #[cfg(feature = "abcipp")] + block_height: self.storage.get_current_decision_height(), + #[cfg(not(feature = "abcipp"))] + block_height: self.storage.last_height, + }; + + let eth_key = match &self.mode { + ShellMode::Validator { data, .. } => { + &data.keys.eth_bridge_keypair + } + _ => unreachable!("{VALIDATOR_EXPECT_MSG}"), + }; + + ext.sign(eth_key) + }) + } + + /// The VerifyVoteExtension ABCI++ method. + /// + /// This checks that the vote extension: + /// * Correctly deserializes. + /// * The Ethereum events vote extension within was correctly signed by an + /// active validator. + /// * The validator set update vote extension within was correctly signed by + /// an active validator, in case it could have been sent at the current + /// block height. + /// * The Ethereum events vote extension block height signed over is correct + /// (for replay protection). + /// * The validator set update vote extension block height signed over is + /// correct (for replay protection). + /// + /// INVARIANT: This method must be stateless. + #[cfg(feature = "abcipp")] + pub fn verify_vote_extension( + &self, + req: request::VerifyVoteExtension, + ) -> response::VerifyVoteExtension { + use crate::facade::tendermint_proto::abci::response_verify_vote_extension::VerifyStatus; + + let ext = match VoteExtension::try_from_slice(&req.vote_extension[..]) { + Ok(ext) => ext, + Err(err) => { + tracing::warn!( + ?err, + ?req.validator_address, + ?req.hash, + req.height, + "Received undeserializable vote extension" + ); + return response::VerifyVoteExtension { + status: VerifyStatus::Reject.into(), + }; + } + }; + + let validated_eth_events = + self.verify_ethereum_events(&req, ext.ethereum_events); + let validated_valset_upd = + self.verify_valset_update(&req, ext.validator_set_update); + + response::VerifyVoteExtension { + status: if validated_eth_events && validated_valset_upd { + VerifyStatus::Accept.into() + } else { + VerifyStatus::Reject.into() + }, + } + } + + /// Check if [`ethereum_events::Vext`] instances are valid. + #[cfg(feature = "abcipp")] + pub fn verify_ethereum_events( + &self, + req: &request::VerifyVoteExtension, + ext: Signed, + ) -> bool { + self.validate_eth_events_vext( + ext, + self.storage.get_current_decision_height(), + ) + .then_some(true) + .unwrap_or_else(|| { + tracing::warn!( + ?req.validator_address, + ?req.hash, + req.height, + "Received Ethereum events vote extension that didn't validate" + ); + false + }) + } + + /// Check if [`validator_set_update::Vext`] instances are valid. + #[cfg(feature = "abcipp")] + pub fn verify_valset_update( + &self, + req: &request::VerifyVoteExtension, + ext: Option, + ) -> bool { + if let Some(ext) = ext { + self.storage + .can_send_validator_set_update(SendValsetUpd::Now) + .then(|| { + // we have a valset update vext when we're expecting one, + // cool, let's validate it + self.validate_valset_upd_vext( + ext, + self.storage.get_current_decision_height(), + ) + }) + .unwrap_or_else(|| { + // either validation failed, or we were expecting a valset + // update vext and got none + tracing::warn!( + ?req.validator_address, + ?req.hash, + req.height, + "Missing or invalid validator set update vote extension" + ); + false + }) + } else { + // NOTE: if we're not supposed to send a validator set update + // vote extension at a particular block height, we will + // just return true as the validation result + true + } + } +} + +/// Given a `Vec` of [`ExtendedVoteInfo`], return an iterator over the +/// ones we could deserialize to [`VoteExtension`] +/// instances. +#[cfg(feature = "abcipp")] +pub fn deserialize_vote_extensions( + vote_extensions: Vec, +) -> impl Iterator + 'static { + vote_extensions.into_iter().filter_map(|vote| { + VoteExtension::try_from_slice(&vote.vote_extension[..]) + .map_err(|err| { + tracing::error!( + ?err, + "Failed to deserialize data as a VoteExtension", + ); + }) + .ok() + }) +} + +/// Given a slice of [`TxBytes`], return an iterator over the +/// ones we could deserialize to vote extension [`ProtocolTx`] +/// instances. +#[cfg(not(feature = "abcipp"))] +pub fn deserialize_vote_extensions<'shell>( + txs: &'shell [TxBytes], + protocol_tx_indices: &'shell mut VecIndexSet, +) -> impl Iterator + 'shell { + use namada::types::transaction::protocol::ProtocolTx; + + txs.iter().enumerate().filter_map(|(index, tx_bytes)| { + let tx = match Tx::try_from(tx_bytes.as_slice()) { + Ok(tx) => tx, + Err(err) => { + tracing::warn!( + ?err, + "Failed to deserialize tx in deserialize_vote_extensions" + ); + return None; + } + }; + match process_tx(tx).ok()? { + TxType::Protocol(ProtocolTx { + tx: + ProtocolTxType::EthEventsVext(_) + | ProtocolTxType::ValSetUpdateVext(_), + .. + }) => { + // mark tx for inclusion + protocol_tx_indices.insert(index); + Some(tx_bytes.clone()) + } + _ => None, + } + }) +} + +/// Yields an iterator over the [`ProtocolTxType`] transactions +/// in a [`VoteExtensionDigest`]. +#[cfg(feature = "abcipp")] +pub fn iter_protocol_txs( + digest: VoteExtensionDigest, +) -> impl Iterator { + [ + Some(ProtocolTxType::EthereumEvents(digest.ethereum_events)), + digest + .validator_set_update + .map(ProtocolTxType::ValidatorSetUpdate), + ] + .into_iter() + .flatten() +} + +/// Yields an iterator over the [`ProtocolTxType`] transactions +/// in a [`VoteExtension`]. +#[cfg(not(feature = "abcipp"))] +pub fn iter_protocol_txs( + ext: VoteExtension, +) -> impl Iterator { + [ + Some(ProtocolTxType::EthEventsVext(ext.ethereum_events)), + ext.validator_set_update + .map(ProtocolTxType::ValSetUpdateVext), + ] + .into_iter() + .flatten() +} + +/// Deserializes `vote_extensions` as [`VoteExtension`] instances, filtering +/// out invalid data, and splits these into [`ethereum_events::Vext`] +/// and [`validator_set_update::Vext`] instances. +#[cfg(feature = "abcipp")] +pub fn split_vote_extensions( + vote_extensions: Vec, +) -> ( + Vec>, + Vec, +) { + let mut eth_evs = vec![]; + let mut valset_upds = vec![]; + + for ext in deserialize_vote_extensions(vote_extensions) { + if let Some(validator_set_update) = ext.validator_set_update { + valset_upds.push(validator_set_update); + } + eth_evs.push(ext.ethereum_events); + } + + (eth_evs, valset_upds) +} diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs b/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs new file mode 100644 index 0000000000..0deeb29cc1 --- /dev/null +++ b/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs @@ -0,0 +1,625 @@ +//! Extend Tendermint votes with Ethereum events seen by a quorum of validators. + +use std::collections::{BTreeMap, HashMap}; + +use namada::ledger::pos::PosQueries; +use namada::ledger::storage::traits::StorageHasher; +use namada::ledger::storage::{DBIter, DB}; +use namada::proto::Signed; +use namada::types::ethereum_events::EthereumEvent; +use namada::types::storage::BlockHeight; +use namada::types::token; +use namada::types::vote_extensions::ethereum_events::{ + self, MultiSignedEthEvent, +}; +#[cfg(feature = "abcipp")] +use namada::types::voting_power::FractionalVotingPower; + +use super::*; +use crate::node::ledger::shell::{Shell, ShellMode}; + +impl Shell +where + D: DB + for<'iter> DBIter<'iter> + Sync + 'static, + H: StorageHasher + Sync + 'static, +{ + /// Validates an Ethereum events vote extension issued at the provided + /// block height. + /// + /// Checks that at epoch of the provided height: + /// * The Tendermint address corresponds to an active validator. + /// * The validator correctly signed the extension. + /// * The validator signed over the correct height inside of the extension. + /// * There are no duplicate Ethereum events in this vote extension, and + /// the events are sorted in ascending order. + #[inline] + #[allow(dead_code)] + pub fn validate_eth_events_vext( + &self, + ext: Signed, + last_height: BlockHeight, + ) -> bool { + self.validate_eth_events_vext_and_get_it_back(ext, last_height) + .is_ok() + } + + /// This method behaves exactly like [`Self::validate_eth_events_vext`], + /// with the added bonus of returning the vote extension back, if it + /// is valid. + pub fn validate_eth_events_vext_and_get_it_back( + &self, + ext: Signed, + last_height: BlockHeight, + ) -> std::result::Result< + (token::Amount, Signed), + VoteExtensionError, + > { + #[cfg(feature = "abcipp")] + if ext.data.block_height != last_height { + tracing::error!( + ext_height = ?ext.data.block_height, + ?last_height, + "Ethereum events vote extension issued for a block height \ + different from the expected last height." + ); + return Err(VoteExtensionError::UnexpectedBlockHeight); + } + #[cfg(not(feature = "abcipp"))] + if ext.data.block_height > last_height { + tracing::error!( + ext_height = ?ext.data.block_height, + ?last_height, + "Ethereum events vote extension issued for a block height \ + higher than the chain's last height." + ); + return Err(VoteExtensionError::UnexpectedBlockHeight); + } + if last_height.0 == 0 { + tracing::error!("Dropping vote extension issued at genesis"); + return Err(VoteExtensionError::IssuedAtGenesis); + } + // verify if we have any duplicate Ethereum events, + // and if these are sorted in ascending order + let have_dupes_or_non_sorted = { + !ext.data + .ethereum_events + // TODO: move to `array_windows` when it reaches Rust stable + .windows(2) + .all(|evs| evs[0] < evs[1]) + }; + let validator = &ext.data.validator_addr; + if have_dupes_or_non_sorted { + tracing::error!( + %validator, + "Found duplicate or non-sorted Ethereum events in a vote extension from \ + some validator" + ); + return Err(VoteExtensionError::HaveDupesOrNonSorted); + } + // get the public key associated with this validator + // + // NOTE(not(feature = "abciplus")): for ABCI++, we should pass + // `last_height` here, instead of `ext.data.block_height` + let ext_height_epoch = + match self.storage.get_epoch(ext.data.block_height) { + Some(epoch) => epoch, + _ => { + tracing::error!( + block_height = ?ext.data.block_height, + "The epoch of the Ethereum events vote extension's \ + block height should always be known", + ); + return Err(VoteExtensionError::UnexpectedEpoch); + } + }; + let (voting_power, pk) = self + .storage + .get_validator_from_address(validator, Some(ext_height_epoch)) + .map_err(|err| { + tracing::error!( + ?err, + %validator, + "Could not get public key from Storage for some validator, \ + while validating Ethereum events vote extension" + ); + VoteExtensionError::PubKeyNotInStorage + })?; + // verify the signature of the vote extension + ext.verify(&pk) + .map_err(|err| { + tracing::error!( + ?err, + ?ext.sig, + ?pk, + %validator, + "Failed to verify the signature of an Ethereum events vote \ + extension issued by some validator" + ); + VoteExtensionError::VerifySigFailed + }) + .map(|_| (voting_power, ext)) + } + + /// Checks the channel from the Ethereum oracle monitoring + /// the fullnode and retrieves all seen Ethereum events. + pub fn new_ethereum_events(&mut self) -> Vec { + match &mut self.mode { + ShellMode::Validator { + ref mut ethereum_recv, + .. + } => { + ethereum_recv.fill_queue(); + ethereum_recv.get_events() + } + _ => vec![], + } + } + + /// Takes an iterator over Ethereum events vote extension instances, + /// and returns another iterator. The latter yields + /// valid Ethereum events vote extensions, or the reason why these + /// are invalid, in the form of a [`VoteExtensionError`]. + #[inline] + pub fn validate_eth_events_vext_list<'iter>( + &'iter self, + vote_extensions: impl IntoIterator> + + 'iter, + ) -> impl Iterator< + Item = std::result::Result< + (token::Amount, Signed), + VoteExtensionError, + >, + > + 'iter { + vote_extensions.into_iter().map(|vote_extension| { + self.validate_eth_events_vext_and_get_it_back( + vote_extension, + self.storage.last_height, + ) + }) + } + + /// Takes a list of signed Ethereum events vote extensions, + /// and filters out invalid instances. + #[inline] + pub fn filter_invalid_eth_events_vexts<'iter>( + &'iter self, + vote_extensions: impl IntoIterator> + + 'iter, + ) -> impl Iterator)> + 'iter + { + self.validate_eth_events_vext_list(vote_extensions) + .filter_map(|ext| ext.ok()) + } + + /// Compresses a set of signed Ethereum events into a single + /// [`ethereum_events::VextDigest`], whilst filtering invalid + /// [`Signed`] instances in the process. + pub fn compress_ethereum_events( + &self, + vote_extensions: Vec>, + ) -> Option { + #[cfg(not(feature = "abcipp"))] + if self.storage.last_height == BlockHeight(0) { + return None; + } + + #[cfg(feature = "abcipp")] + let vexts_epoch = + self.storage.get_epoch(self.storage.last_height).expect( + "The epoch of the last block height should always be known", + ); + + #[cfg(feature = "abcipp")] + let total_voting_power = + u64::from(self.storage.get_total_voting_power(Some(vexts_epoch))); + #[cfg(feature = "abcipp")] + let mut voting_power = FractionalVotingPower::default(); + + let mut event_observers = BTreeMap::new(); + let mut signatures = HashMap::new(); + + for (_validator_voting_power, vote_extension) in + self.filter_invalid_eth_events_vexts(vote_extensions) + { + let validator_addr = vote_extension.data.validator_addr; + let block_height = vote_extension.data.block_height; + + // update voting power + #[cfg(feature = "abcipp")] + { + let validator_voting_power = u64::from(_validator_voting_power); + voting_power += FractionalVotingPower::new( + validator_voting_power, + total_voting_power, + ) + .expect( + "The voting power we obtain from storage should always be \ + valid", + ); + } + + // register all ethereum events seen by `validator_addr` + for ev in vote_extension.data.ethereum_events { + let signers = + event_observers.entry(ev).or_insert_with(BTreeSet::new); + signers.insert((validator_addr.clone(), block_height)); + } + + // register the signature of `validator_addr` + let addr = validator_addr.clone(); + let sig = vote_extension.sig; + + let key = (addr, block_height); + tracing::debug!( + ?key, + ?sig, + ?validator_addr, + "Inserting signature into ethereum_events::VextDigest" + ); + if let Some(existing_sig) = signatures.insert(key, sig.clone()) { + tracing::warn!( + ?sig, + ?existing_sig, + ?validator_addr, + "Overwrote old signature from validator while \ + constructing ethereum_events::VextDigest - maybe private \ + key of validator is being used by multiple nodes?" + ); + } + } + + #[cfg(feature = "abcipp")] + if voting_power <= FractionalVotingPower::TWO_THIRDS { + tracing::error!( + "Tendermint has decided on a block including Ethereum events \ + reflecting <= 2/3 of the total stake" + ); + return None; + } + + let events: Vec = event_observers + .into_iter() + .map(|(event, signers)| MultiSignedEthEvent { event, signers }) + .collect(); + + Some(ethereum_events::VextDigest { events, signatures }) + } +} + +#[cfg(test)] +mod test_vote_extensions { + use std::convert::TryInto; + + #[cfg(feature = "abcipp")] + use borsh::{BorshDeserialize, BorshSerialize}; + use namada::ledger::pos; + use namada::ledger::pos::namada_proof_of_stake::PosBase; + use namada::ledger::pos::PosQueries; + use namada::types::ethereum_events::{ + EthAddress, EthereumEvent, TransferToEthereum, + }; + use namada::types::key::*; + use namada::types::storage::{BlockHeight, Epoch}; + use namada::types::vote_extensions::ethereum_events; + #[cfg(feature = "abcipp")] + use namada::types::vote_extensions::VoteExtension; + + #[cfg(feature = "abcipp")] + use crate::facade::tendermint_proto::abci::response_verify_vote_extension::VerifyStatus; + #[cfg(feature = "abcipp")] + use crate::facade::tower_abci::request; + use crate::node::ledger::shell::test_utils::*; + use crate::node::ledger::shims::abcipp_shim_types::shim::request::FinalizeBlock; + + /// Test that we successfully receive ethereum events + /// from the channel to fullnode process + /// + /// We further check that ledger side buffering is done if multiple + /// events are in the channel and that queueing and de-duplicating is + /// done + #[test] + fn test_get_eth_events() { + let (mut shell, _, oracle) = setup(); + let event_1 = EthereumEvent::TransfersToEthereum { + nonce: 1.into(), + transfers: vec![TransferToEthereum { + amount: 100.into(), + asset: EthAddress([1; 20]), + receiver: EthAddress([2; 20]), + }], + }; + let event_2 = EthereumEvent::TransfersToEthereum { + nonce: 2.into(), + transfers: vec![TransferToEthereum { + amount: 100.into(), + asset: EthAddress([1; 20]), + receiver: EthAddress([2; 20]), + }], + }; + let event_3 = EthereumEvent::NewContract { + name: "Test".to_string(), + address: EthAddress([0; 20]), + }; + + tokio_test::block_on(oracle.send(event_1.clone())) + .expect("Test failed"); + tokio_test::block_on(oracle.send(event_3.clone())) + .expect("Test failed"); + let [event_first, event_second]: [EthereumEvent; 2] = + shell.new_ethereum_events().try_into().expect("Test failed"); + + assert_eq!(event_first, event_1); + assert_eq!(event_second, event_3); + // check that we queue and de-duplicate events + tokio_test::block_on(oracle.send(event_2.clone())) + .expect("Test failed"); + tokio_test::block_on(oracle.send(event_3.clone())) + .expect("Test failed"); + let [event_first, event_second, event_third]: [EthereumEvent; 3] = + shell.new_ethereum_events().try_into().expect("Test failed"); + + assert_eq!(event_first, event_1); + assert_eq!(event_second, event_2); + assert_eq!(event_third, event_3); + } + + /// Test that ethereum events are added to vote extensions. + /// Check that vote extensions pass verification. + #[cfg(feature = "abcipp")] + #[tokio::test] + async fn test_eth_events_vote_extension() { + let (mut shell, _, oracle) = setup(); + let address = shell + .mode + .get_validator_address() + .expect("Test failed") + .clone(); + let event_1 = EthereumEvent::TransfersToEthereum { + nonce: 1.into(), + transfers: vec![TransferToEthereum { + amount: 100.into(), + asset: EthAddress([1; 20]), + receiver: EthAddress([2; 20]), + }], + }; + let event_2 = EthereumEvent::NewContract { + name: "Test".to_string(), + address: EthAddress([0; 20]), + }; + oracle.send(event_1.clone()).await.expect("Test failed"); + oracle.send(event_2.clone()).await.expect("Test failed"); + let vote_extension = + ::try_from_slice( + &shell.extend_vote(Default::default()).vote_extension[..], + ) + .expect("Test failed"); + + let [event_first, event_second]: [EthereumEvent; 2] = vote_extension + .ethereum_events + .data + .ethereum_events + .clone() + .try_into() + .expect("Test failed"); + + assert_eq!(event_first, event_1); + assert_eq!(event_second, event_2); + let req = request::VerifyVoteExtension { + hash: vec![], + validator_address: address + .raw_hash() + .expect("Test failed") + .as_bytes() + .to_vec(), + height: 0, + vote_extension: vote_extension.try_to_vec().expect("Test failed"), + }; + let res = shell.verify_vote_extension(req); + assert_eq!(res.status, i32::from(VerifyStatus::Accept)); + } + + /// Test that Ethereum events signed by a non-validator are rejected + #[test] + fn test_eth_events_must_be_signed_by_validator() { + let (shell, _, _) = setup_at_height(3u64); + let signing_key = gen_keypair(); + let address = shell + .mode + .get_validator_address() + .expect("Test failed") + .clone(); + #[allow(clippy::redundant_clone)] + let ethereum_events = ethereum_events::Vext { + ethereum_events: vec![EthereumEvent::TransfersToEthereum { + nonce: 1.into(), + transfers: vec![TransferToEthereum { + amount: 100.into(), + asset: EthAddress([1; 20]), + receiver: EthAddress([2; 20]), + }], + }], + block_height: shell.storage.get_current_decision_height(), + validator_addr: address.clone(), + } + .sign(&signing_key); + #[cfg(feature = "abcipp")] + let req = request::VerifyVoteExtension { + hash: vec![], + validator_address: address + .raw_hash() + .expect("Test failed") + .as_bytes() + .to_vec(), + height: 0, + vote_extension: VoteExtension { + ethereum_events: ethereum_events.clone(), + validator_set_update: None, + } + .try_to_vec() + .expect("Test failed"), + }; + #[cfg(feature = "abcipp")] + assert_eq!( + shell.verify_vote_extension(req).status, + i32::from(VerifyStatus::Reject) + ); + assert!(!shell.validate_eth_events_vext( + ethereum_events, + shell.storage.get_current_decision_height(), + )) + } + + /// Test that validation of Ethereum events cast during the + /// previous block are accepted for the current block. This + /// should pass even if the epoch changed resulting in a + /// change to the validator set. + #[test] + fn test_validate_eth_events_vexts() { + let (mut shell, _recv, _) = setup_at_height(3u64); + let signing_key = + shell.mode.get_protocol_key().expect("Test failed").clone(); + let address = shell + .mode + .get_validator_address() + .expect("Test failed") + .clone(); + let signed_height = shell.storage.get_current_decision_height(); + let vote_ext = ethereum_events::Vext { + ethereum_events: vec![EthereumEvent::TransfersToEthereum { + nonce: 1.into(), + transfers: vec![TransferToEthereum { + amount: 100.into(), + asset: EthAddress([1; 20]), + receiver: EthAddress([2; 20]), + }], + }], + block_height: signed_height, + validator_addr: address, + } + .sign(shell.mode.get_protocol_key().expect("Test failed")); + + assert_eq!(shell.storage.get_current_epoch().0.0, 0); + // We make a change so that there are no + // validators in the next epoch + let mut current_validators = shell.storage.read_validator_set(); + current_validators.data.insert( + 1, + Some(pos::types::ValidatorSet { + active: Default::default(), + inactive: Default::default(), + }), + ); + shell.storage.write_validator_set(¤t_validators); + // we advance forward to the next epoch + let mut req = FinalizeBlock::default(); + req.header.time = namada::types::time::DateTimeUtc::now(); + shell.storage.last_height = BlockHeight(11); + shell.finalize_block(req).expect("Test failed"); + shell.commit(); + assert_eq!(shell.storage.get_current_epoch().0.0, 1); + assert!( + shell + .storage + .get_validator_from_protocol_pk(&signing_key.ref_to(), None) + .is_err() + ); + let prev_epoch = Epoch(shell.storage.get_current_epoch().0.0 - 1); + assert!( + shell + .shell + .storage + .get_validator_from_protocol_pk( + &signing_key.ref_to(), + Some(prev_epoch) + ) + .is_ok() + ); + + assert!(shell.validate_eth_events_vext(vote_ext, signed_height)); + } + + /// Test that an [`ethereum_events::Vext`] that incorrectly labels what + /// block it was included on in a vote extension is rejected + #[test] + fn reject_incorrect_block_number() { + let (shell, _, _) = setup_at_height(3u64); + let address = shell.mode.get_validator_address().unwrap().clone(); + #[allow(clippy::redundant_clone)] + let ethereum_events = ethereum_events::Vext { + ethereum_events: vec![EthereumEvent::TransfersToEthereum { + nonce: 1.into(), + transfers: vec![TransferToEthereum { + amount: 100.into(), + asset: EthAddress([1; 20]), + receiver: EthAddress([2; 20]), + }], + }], + block_height: shell.storage.last_height, + validator_addr: address.clone(), + } + .sign(shell.mode.get_protocol_key().expect("Test failed")); + + #[cfg(feature = "abcipp")] + { + let req = request::VerifyVoteExtension { + hash: vec![], + validator_address: address.try_to_vec().expect("Test failed"), + height: 0, + vote_extension: VoteExtension { + ethereum_events: ethereum_events.clone(), + validator_set_update: None, + } + .try_to_vec() + .expect("Test failed"), + }; + + assert_eq!( + shell.verify_vote_extension(req).status, + i32::from(VerifyStatus::Reject) + ); + } + assert!(shell.validate_eth_events_vext( + ethereum_events, + shell.storage.last_height + )) + } + + /// Test if we reject Ethereum events vote extensions + /// issued at genesis + #[test] + fn test_reject_genesis_vexts() { + let (shell, _, _) = setup(); + let address = shell.mode.get_validator_address().unwrap().clone(); + #[allow(clippy::redundant_clone)] + let vote_ext = ethereum_events::Vext { + ethereum_events: vec![EthereumEvent::TransfersToEthereum { + nonce: 1.into(), + transfers: vec![TransferToEthereum { + amount: 100.into(), + asset: EthAddress([1; 20]), + receiver: EthAddress([2; 20]), + }], + }], + block_height: shell.storage.last_height, + validator_addr: address.clone(), + } + .sign(shell.mode.get_protocol_key().expect("Test failed")); + + #[cfg(feature = "abcipp")] + let req = request::VerifyVoteExtension { + hash: vec![], + validator_address: address.try_to_vec().expect("Test failed"), + height: 0, + vote_extension: vote_ext.try_to_vec().expect("Test failed"), + }; + #[cfg(feature = "abcipp")] + assert_eq!( + shell.verify_vote_extension(req).status, + i32::from(VerifyStatus::Reject) + ); + assert!( + !shell + .validate_eth_events_vext(vote_ext, shell.storage.last_height) + ) + } +} diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs b/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs new file mode 100644 index 0000000000..ea7548df50 --- /dev/null +++ b/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs @@ -0,0 +1,597 @@ +//! Extend Tendermint votes with validator set updates, to be relayed to +//! Namada's Ethereum bridge smart contracts. + +use std::collections::HashMap; + +use namada::ledger::pos::namada_proof_of_stake::PosBase; +use namada::ledger::pos::PosQueries; +use namada::ledger::storage::traits::StorageHasher; +use namada::ledger::storage::{DBIter, DB}; +use namada::types::storage::BlockHeight; +use namada::types::token; +use namada::types::vote_extensions::validator_set_update; +#[cfg(feature = "abcipp")] +use namada::types::voting_power::FractionalVotingPower; + +use super::*; +use crate::node::ledger::shell::Shell; + +impl Shell +where + D: DB + for<'iter> DBIter<'iter> + Sync + 'static, + H: StorageHasher + Sync + 'static, +{ + /// Validates a validator set update vote extension issued for the + /// succeeding epoch of the block height provided as an argument. + /// + /// Checks that: + /// * The signing validator was active at the preceding epoch. + /// * The validator correctly signed the extension, with its Ethereum hot + /// key. + /// * The validator signed over the block height inside of the extension. + /// * The voting powers in the vote extension correspond to the voting + /// powers of the validators of the new epoch. + /// * The voting powers are normalized to `2^32`, and sorted in descending + /// order. + #[inline] + #[allow(dead_code)] + pub fn validate_valset_upd_vext( + &self, + ext: validator_set_update::SignedVext, + last_height: BlockHeight, + ) -> bool { + self.validate_valset_upd_vext_and_get_it_back(ext, last_height) + .is_ok() + } + + /// This method behaves exactly like [`Self::validate_valset_upd_vext`], + /// with the added bonus of returning the vote extension back, if it + /// is valid. + pub fn validate_valset_upd_vext_and_get_it_back( + &self, + ext: validator_set_update::SignedVext, + last_height: BlockHeight, + ) -> std::result::Result< + (token::Amount, validator_set_update::SignedVext), + VoteExtensionError, + > { + #[cfg(feature = "abcipp")] + if ext.data.block_height != last_height { + tracing::error!( + ext_height = ?ext.data.block_height, + ?last_height, + "Validator set update vote extension issued for a block \ + height different from the expected last height.", + ); + return Err(VoteExtensionError::UnexpectedBlockHeight); + } + #[cfg(not(feature = "abcipp"))] + if ext.data.block_height > last_height { + tracing::error!( + ext_height = ?ext.data.block_height, + ?last_height, + "Validator set update vote extension issued for a block \ + height higher than the chain's last height.", + ); + return Err(VoteExtensionError::UnexpectedBlockHeight); + } + if last_height.0 == 0 { + tracing::error!("Dropping vote extension issued at genesis"); + return Err(VoteExtensionError::IssuedAtGenesis); + } + // NOTE(not(feature = "abciplus")): for ABCI++, we should pass + // `last_height` here, instead of `ext.data.block_height` + let ext_height_epoch = match self + .storage + .get_epoch(ext.data.block_height) + { + Some(epoch) => epoch, + _ => { + tracing::error!( + block_height = ?ext.data.block_height, + "The epoch of the validator set update vote extension's \ + block height should always be known", + ); + return Err(VoteExtensionError::UnexpectedEpoch); + } + }; + // verify if the new epoch validators' voting powers in storage match + // the voting powers in the vote extension + for (eth_addr_book, namada_addr, namada_power) in self + .storage + .get_active_eth_addresses(Some(ext_height_epoch.next())) + { + let &ext_power = match ext.data.voting_powers.get(ð_addr_book) { + Some(voting_power) => voting_power, + _ => { + tracing::error!( + ?eth_addr_book, + "Could not find expected Ethereum addresses in valset \ + upd vote extension", + ); + return Err( + VoteExtensionError::ValidatorMissingFromExtension, + ); + } + }; + if namada_power != ext_power { + tracing::error!( + validator = %namada_addr, + expected = ?namada_power, + got = ?ext_power, + "Found unexpected voting power value in valset upd vote extension", + ); + return Err(VoteExtensionError::DivergesFromStorage); + } + } + // get the public key associated with this validator + let validator = &ext.data.validator_addr; + let (voting_power, _) = self + .storage + .get_validator_from_address(validator, Some(ext_height_epoch)) + .map_err(|err| { + tracing::error!( + ?err, + %validator, + "Could not get public key from Storage for some validator, \ + while validating valset upd vote extension" + ); + VoteExtensionError::PubKeyNotInStorage + })?; + let epoched_pk = self + .storage + .read_validator_eth_hot_key(validator) + .expect("We should have this hot key in storage"); + let pk = epoched_pk + .get(ext_height_epoch) + .expect("We should have the hot key of the given epoch"); + // verify the signature of the vote extension + ext.verify(pk) + .map_err(|err| { + tracing::error!( + ?err, + ?ext.sig, + ?pk, + %validator, + "Failed to verify the signature of a valset upd vote \ + extension issued by some validator" + ); + VoteExtensionError::VerifySigFailed + }) + .map(|_| (voting_power, ext)) + } + + /// Takes an iterator over validator set update vote extension instances, + /// and returns another iterator. The latter yields + /// valid validator set update vote extensions, or the reason why these + /// are invalid, in the form of a [`VoteExtensionError`]. + #[inline] + pub fn validate_valset_upd_vext_list( + &self, + vote_extensions: impl IntoIterator + + 'static, + ) -> impl Iterator< + Item = std::result::Result< + (token::Amount, validator_set_update::SignedVext), + VoteExtensionError, + >, + > + '_ { + vote_extensions.into_iter().map(|vote_extension| { + self.validate_valset_upd_vext_and_get_it_back( + vote_extension, + self.storage.last_height, + ) + }) + } + + /// Takes a list of signed validator set update vote extensions, + /// and filters out invalid instances. + #[inline] + pub fn filter_invalid_valset_upd_vexts( + &self, + vote_extensions: impl IntoIterator + + 'static, + ) -> impl Iterator + '_ + { + self.validate_valset_upd_vext_list(vote_extensions) + .filter_map(|ext| ext.ok()) + } + + /// Compresses a set of signed validator set update vote extensions into a + /// single [`validator_set_update::VextDigest`], whilst filtering + /// invalid [`validator_set_update::SignedVext`] instances in the + /// process. + pub fn compress_valset_updates( + &self, + vote_extensions: Vec, + ) -> Option { + #[cfg(not(feature = "abcipp"))] + if self.storage.last_height == BlockHeight(0) { + return None; + } + + #[cfg(feature = "abcipp")] + let vexts_epoch = + self.storage.get_epoch(self.storage.last_height).expect( + "The epoch of the last block height should always be known", + ); + + #[cfg(feature = "abcipp")] + let total_voting_power = + u64::from(self.storage.get_total_voting_power(Some(vexts_epoch))); + #[cfg(feature = "abcipp")] + let mut voting_power = FractionalVotingPower::default(); + + let mut voting_powers = None; + let mut signatures = HashMap::new(); + + for (_validator_voting_power, mut vote_extension) in + self.filter_invalid_valset_upd_vexts(vote_extensions) + { + if voting_powers.is_none() { + voting_powers = Some(std::mem::take( + &mut vote_extension.data.voting_powers, + )); + } + + let validator_addr = vote_extension.data.validator_addr; + let block_height = vote_extension.data.block_height; + + // update voting power + #[cfg(feature = "abcipp")] + { + let validator_voting_power = u64::from(_validator_voting_power); + voting_power += FractionalVotingPower::new( + validator_voting_power, + total_voting_power, + ) + .expect( + "The voting power we obtain from storage should always be \ + valid", + ); + } + + // register the signature of `validator_addr` + let addr = validator_addr.clone(); + let sig = vote_extension.sig; + + let key = (addr, block_height); + tracing::debug!( + ?key, + ?sig, + ?validator_addr, + "Inserting signature into validator_set_update::VextDigest" + ); + if let Some(existing_sig) = signatures.insert(key, sig.clone()) { + tracing::warn!( + ?sig, + ?existing_sig, + ?validator_addr, + "Overwrote old signature from validator while \ + constructing validator_set_update::VextDigest - maybe \ + private key of validator is being used by multiple nodes?" + ); + } + } + + #[cfg(feature = "abcipp")] + if voting_power <= FractionalVotingPower::TWO_THIRDS { + tracing::error!( + "Tendermint has decided on a block including validator set \ + update vote extensions reflecting <= 2/3 of the total stake" + ); + return None; + } + + #[cfg(feature = "abcipp")] + let voting_powers = voting_powers.expect( + "We have enough voting power, so at least one validator set \ + update vote extension must have been validated.", + ); + + #[cfg(not(feature = "abcipp"))] + let voting_powers = voting_powers.unwrap_or_default(); + + Some(validator_set_update::VextDigest { + signatures, + voting_powers, + }) + } +} + +#[cfg(test)] +mod test_vote_extensions { + use std::default::Default; + + #[cfg(feature = "abcipp")] + #[cfg(feature = "abcipp")] + use borsh::BorshSerialize; + use namada::ledger::pos; + use namada::ledger::pos::namada_proof_of_stake::PosBase; + use namada::ledger::pos::PosQueries; + use namada::types::key::RefTo; + #[cfg(feature = "abcipp")] + use namada::types::vote_extensions::ethereum_events; + use namada::types::vote_extensions::validator_set_update; + #[cfg(feature = "abcipp")] + use namada::types::vote_extensions::VoteExtension; + + #[cfg(feature = "abcipp")] + use crate::facade::tendermint_proto::abci::response_verify_vote_extension::VerifyStatus; + #[cfg(feature = "abcipp")] + use crate::facade::tower_abci::request; + use crate::node::ledger::shell::test_utils; + use crate::node::ledger::shims::abcipp_shim_types::shim::request::FinalizeBlock; + use crate::wallet; + + /// Test if a [`validator_set_update::Vext`] that incorrectly labels what + /// block height it was included on in a vote extension is rejected + #[test] + fn test_reject_incorrect_block_height() { + let (shell, _recv, _) = test_utils::setup(); + let validator_addr = + shell.mode.get_validator_address().unwrap().clone(); + + let eth_bridge_key = + shell.mode.get_eth_bridge_keypair().expect("Test failed"); + + let voting_powers = { + let next_epoch = shell.storage.get_current_epoch().0.next(); + shell + .storage + .get_active_eth_addresses(Some(next_epoch)) + .map(|(eth_addr_book, _, voting_power)| { + (eth_addr_book, voting_power) + }) + .collect() + }; + #[allow(clippy::redundant_clone)] + let validator_set_update = Some( + validator_set_update::Vext { + voting_powers, + validator_addr: validator_addr.clone(), + // invalid height + block_height: shell.storage.get_current_decision_height() + 1, + } + .sign(eth_bridge_key), + ); + #[cfg(feature = "abcipp")] + { + let protocol_key = + shell.mode.get_protocol_key().expect("Test failed"); + let ethereum_events = ethereum_events::Vext::empty( + shell.storage.get_current_decision_height(), + validator_addr, + ) + .sign(protocol_key); + let req = request::VerifyVoteExtension { + vote_extension: VoteExtension { + ethereum_events, + validator_set_update, + } + .try_to_vec() + .expect("Test failed"), + ..Default::default() + }; + + assert_eq!( + shell.verify_vote_extension(req).status, + i32::from(VerifyStatus::Reject) + ); + } + #[cfg(not(feature = "abcipp"))] + { + assert!(!shell.validate_valset_upd_vext( + validator_set_update.unwrap(), + shell.storage.get_current_decision_height() + )) + } + } + + /// Test that validator set update vote extensions signed by + /// a non-validator are rejected + #[test] + fn test_valset_upd_must_be_signed_by_validator() { + let (shell, _recv, _) = test_utils::setup(); + let (eth_bridge_key, _protocol_key, validator_addr) = { + let bertha_key = wallet::defaults::bertha_keypair(); + let bertha_addr = wallet::defaults::bertha_address(); + (test_utils::gen_secp256k1_keypair(), bertha_key, bertha_addr) + }; + let voting_powers = { + let next_epoch = shell.storage.get_current_epoch().0.next(); + shell + .storage + .get_active_eth_addresses(Some(next_epoch)) + .map(|(eth_addr_book, _, voting_power)| { + (eth_addr_book, voting_power) + }) + .collect() + }; + #[allow(clippy::redundant_clone)] + let validator_set_update = Some( + validator_set_update::Vext { + voting_powers, + block_height: shell.storage.get_current_decision_height(), + validator_addr: validator_addr.clone(), + } + .sign(ð_bridge_key), + ); + #[cfg(feature = "abcipp")] + { + let ethereum_events = ethereum_events::Vext::empty( + shell.storage.get_current_decision_height(), + validator_addr, + ) + .sign(&_protocol_key); + let req = request::VerifyVoteExtension { + vote_extension: VoteExtension { + ethereum_events, + validator_set_update, + } + .try_to_vec() + .expect("Test failed"), + ..Default::default() + }; + assert_eq!( + shell.verify_vote_extension(req).status, + i32::from(VerifyStatus::Reject) + ); + } + #[cfg(not(feature = "abcipp"))] + assert!(!shell.validate_valset_upd_vext( + validator_set_update.unwrap(), + shell.storage.get_current_decision_height() + )); + } + + /// Test the validation of a validator set update emitted for + /// some epoch `E`. The test should pass even if the epoch + /// changed to some epoch `E': E' > E`, resulting in a + /// change to the validator set. + #[test] + fn test_validate_valset_upd_vexts() { + let (mut shell, _recv, _) = test_utils::setup(); + let protocol_key = + shell.mode.get_protocol_key().expect("Test failed").clone(); + let eth_bridge_key = shell + .mode + .get_eth_bridge_keypair() + .expect("Test failed") + .clone(); + let validator_addr = shell + .mode + .get_validator_address() + .expect("Test failed") + .clone(); + let signed_height = shell.storage.get_current_decision_height(); + let voting_powers = { + let next_epoch = shell.storage.get_current_epoch().0.next(); + shell + .storage + .get_active_eth_addresses(Some(next_epoch)) + .map(|(eth_addr_book, _, voting_power)| { + (eth_addr_book, voting_power) + }) + .collect() + }; + let vote_ext = validator_set_update::Vext { + voting_powers, + block_height: signed_height, + validator_addr, + } + .sign(ð_bridge_key); + + // validators from the current epoch sign over validator + // set of the next epoch + assert_eq!(shell.storage.get_current_epoch().0.0, 0); + + // remove all validators of the next epoch + let mut current_validators = shell.storage.read_validator_set(); + current_validators.data.insert( + 1, + Some(pos::types::ValidatorSet { + active: Default::default(), + inactive: Default::default(), + }), + ); + shell.storage.write_validator_set(¤t_validators); + // we advance forward to the next epoch + let mut req = FinalizeBlock::default(); + req.header.time = namada::types::time::DateTimeUtc::now(); + shell.storage.last_height = + shell.storage.get_current_decision_height() + 11; + shell.finalize_block(req).expect("Test failed"); + shell.commit(); + assert_eq!(shell.storage.get_current_epoch().0.0, 1); + assert!( + shell + .storage + .get_validator_from_protocol_pk(&protocol_key.ref_to(), None) + .is_err() + ); + let prev_epoch = shell.storage.get_current_epoch().0 - 1; + assert!( + shell + .shell + .storage + .get_validator_from_protocol_pk( + &protocol_key.ref_to(), + Some(prev_epoch) + ) + .is_ok() + ); + + assert!(shell.validate_valset_upd_vext(vote_ext, signed_height)); + } + + /// Test if a [`validator_set_update::Vext`] with an incorrect signature + /// is rejected + #[test] + fn test_reject_bad_signatures() { + let (shell, _recv, _) = test_utils::setup(); + let validator_addr = + shell.mode.get_validator_address().unwrap().clone(); + + let eth_bridge_key = + shell.mode.get_eth_bridge_keypair().expect("Test failed"); + + #[allow(clippy::redundant_clone)] + let validator_set_update = { + let voting_powers = { + let next_epoch = shell.storage.get_current_epoch().0.next(); + shell + .storage + .get_active_eth_addresses(Some(next_epoch)) + .map(|(eth_addr_book, _, voting_power)| { + (eth_addr_book, voting_power) + }) + .collect() + }; + let mut ext = validator_set_update::Vext { + voting_powers, + block_height: shell.storage.get_current_decision_height(), + validator_addr: validator_addr.clone(), + } + .sign(eth_bridge_key); + ext.sig = test_utils::invalidate_signature(ext.sig); + Some(ext) + }; + #[cfg(feature = "abcipp")] + { + let protocol_key = + shell.mode.get_protocol_key().expect("Test failed"); + let ethereum_events = ethereum_events::Vext::empty( + shell.storage.get_current_decision_height(), + validator_addr, + ) + .sign(protocol_key); + let req = request::VerifyVoteExtension { + vote_extension: VoteExtension { + ethereum_events, + validator_set_update: validator_set_update.clone(), + } + .try_to_vec() + .expect("Test failed"), + ..Default::default() + }; + assert_eq!( + shell.verify_vote_extension(req).status, + i32::from(VerifyStatus::Reject) + ); + } + assert!(!shell.validate_valset_upd_vext( + validator_set_update.unwrap(), + shell.storage.get_current_decision_height() + )); + } + + /// Test if a [`validator_set_update::Vext`] is signed with a secp key + /// that belongs to an active validator of some previous epoch + #[test] + #[ignore] + fn test_secp_key_belongs_to_active_validator() { + // TODO: we need to prove ownership of validator keys + // https://github.com/anoma/namada/issues/106 + } +} diff --git a/apps/src/lib/node/ledger/shims/abcipp_shim.rs b/apps/src/lib/node/ledger/shims/abcipp_shim.rs index 74a56a4ddc..1a098d970b 100644 --- a/apps/src/lib/node/ledger/shims/abcipp_shim.rs +++ b/apps/src/lib/node/ledger/shims/abcipp_shim.rs @@ -6,16 +6,16 @@ use std::task::{Context, Poll}; use futures::future::FutureExt; use namada::types::address::Address; +use namada::types::ethereum_events::EthereumEvent; #[cfg(not(feature = "abcipp"))] use namada::types::hash::Hash; #[cfg(not(feature = "abcipp"))] use namada::types::storage::BlockHash; #[cfg(not(feature = "abcipp"))] use namada::types::transaction::hash_tx; -use tokio::sync::mpsc::UnboundedSender; +use tokio::sync::mpsc::{Receiver, UnboundedSender}; use tower::Service; -use super::super::Shell; use super::abcipp_shim_types::shim::request::{FinalizeBlock, ProcessedTx}; #[cfg(not(feature = "abcipp"))] use super::abcipp_shim_types::shim::TxBytes; @@ -24,6 +24,7 @@ use crate::config; #[cfg(not(feature = "abcipp"))] use crate::facade::tendermint_proto::abci::RequestBeginBlock; use crate::facade::tower_abci::{BoxError, Request as Req, Response as Resp}; +use crate::node::ledger::shell::Shell; /// The shim wraps the shell, which implements ABCI++. /// The shim makes a crude translation between the ABCI interface currently used @@ -44,10 +45,12 @@ pub struct AbcippShim { impl AbcippShim { /// Create a shell with a ABCI service that passes messages to and from the /// shell. + #[allow(clippy::too_many_arguments)] pub fn new( config: config::Ledger, wasm_dir: PathBuf, broadcast_sender: UnboundedSender>, + eth_receiver: Option>, db_cache: &rocksdb::Cache, vp_wasm_compilation_cache: u64, tx_wasm_compilation_cache: u64, @@ -62,6 +65,7 @@ impl AbcippShim { config, wasm_dir, broadcast_sender, + eth_receiver, Some(db_cache), vp_wasm_compilation_cache, tx_wasm_compilation_cache, @@ -103,8 +107,8 @@ impl AbcippShim { #[cfg(feature = "abcipp")] Req::FinalizeBlock(block) => { let unprocessed_txs = block.txs.clone(); - let processing_results = - self.service.process_txs(&block.txs); + let (processing_results, _) = + self.service.check_proposal(&block.txs); let mut txs = Vec::with_capacity(unprocessed_txs.len()); for (result, tx) in processing_results .into_iter() @@ -137,8 +141,8 @@ impl AbcippShim { } #[cfg(not(feature = "abcipp"))] Req::EndBlock(_) => { - let processing_results = - self.service.process_txs(&self.delivered_txs); + let (processing_results, _) = + self.service.check_proposal(&self.delivered_txs); let mut txs = Vec::with_capacity(self.delivered_txs.len()); let mut delivered = vec![]; std::mem::swap(&mut self.delivered_txs, &mut delivered); diff --git a/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs b/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs index cb3145f0e2..4e6b83dbbf 100644 --- a/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs +++ b/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs @@ -1,37 +1,27 @@ -#[cfg(not(feature = "abcipp"))] -use tower_abci::{Request, Response}; -#[cfg(feature = "abcipp")] -use tower_abci_abcipp::{Request, Response}; +use crate::facade::tower_abci::{Request, Response}; pub mod shim { use std::convert::TryFrom; + use thiserror::Error; + + use super::{Request as Req, Response as Resp}; #[cfg(not(feature = "abcipp"))] - use tendermint_proto::abci::{ + use crate::facade::tendermint_proto::abci::ResponseEndBlock; + use crate::facade::tendermint_proto::abci::{ RequestApplySnapshotChunk, RequestCheckTx, RequestCommit, RequestEcho, RequestFlush, RequestInfo, RequestInitChain, RequestListSnapshots, RequestLoadSnapshotChunk, RequestOfferSnapshot, RequestPrepareProposal, RequestProcessProposal, RequestQuery, ResponseApplySnapshotChunk, - ResponseCheckTx, ResponseCommit, ResponseEcho, ResponseEndBlock, - ResponseFlush, ResponseInfo, ResponseInitChain, ResponseListSnapshots, - ResponseLoadSnapshotChunk, ResponseOfferSnapshot, - ResponsePrepareProposal, ResponseQuery, + ResponseCheckTx, ResponseCommit, ResponseEcho, ResponseFlush, + ResponseInfo, ResponseInitChain, ResponseListSnapshots, + ResponseLoadSnapshotChunk, ResponseOfferSnapshot, ResponseQuery, }; #[cfg(feature = "abcipp")] - use tendermint_proto_abcipp::abci::{ - RequestApplySnapshotChunk, RequestCheckTx, RequestCommit, RequestEcho, - RequestExtendVote, RequestFlush, RequestInfo, RequestInitChain, - RequestListSnapshots, RequestLoadSnapshotChunk, RequestOfferSnapshot, - RequestPrepareProposal, RequestProcessProposal, RequestQuery, - RequestVerifyVoteExtension, ResponseApplySnapshotChunk, - ResponseCheckTx, ResponseCommit, ResponseEcho, ResponseExtendVote, - ResponseFlush, ResponseInfo, ResponseInitChain, ResponseListSnapshots, - ResponseLoadSnapshotChunk, ResponseOfferSnapshot, - ResponsePrepareProposal, ResponseQuery, ResponseVerifyVoteExtension, + use crate::facade::tendermint_proto::abci::{ + RequestExtendVote, RequestVerifyVoteExtension, ResponseExtendVote, + ResponseVerifyVoteExtension, }; - use thiserror::Error; - - use super::{Request as Req, Response as Resp}; use crate::node::ledger::shell; pub type TxBytes = Vec; @@ -129,7 +119,7 @@ pub mod shim { InitChain(ResponseInitChain), Info(ResponseInfo), Query(ResponseQuery), - PrepareProposal(ResponsePrepareProposal), + PrepareProposal(response::PrepareProposal), VerifyHeader(response::VerifyHeader), ProcessProposal(response::ProcessProposal), RevertProposal(response::RevertProposal), @@ -176,7 +166,7 @@ pub mod shim { Ok(Resp::ApplySnapshotChunk(inner)) } Response::PrepareProposal(inner) => { - Ok(Resp::PrepareProposal(inner)) + Ok(Resp::PrepareProposal(inner.into())) } #[cfg(feature = "abcipp")] Response::ExtendVote(inner) => Ok(Resp::ExtendVote(inner)), @@ -272,7 +262,8 @@ pub mod shim { use namada::ledger::events::EventLevel; use crate::facade::tendermint_proto::abci::{ - Event as TmEvent, ResponseProcessProposal, ValidatorUpdate, + Event as TmEvent, ResponsePrepareProposal, ResponseProcessProposal, + ValidatorUpdate, }; #[cfg(not(feature = "abcipp"))] use crate::facade::tendermint_proto::types::ConsensusParams; @@ -282,6 +273,26 @@ pub mod shim { types::ConsensusParams, }; + #[derive(Debug, Default)] + pub struct PrepareProposal { + pub txs: Vec, + } + + #[cfg(feature = "abcipp")] + impl From for ResponsePrepareProposal { + fn from(_: PrepareProposal) -> Self { + // TODO(namada#198): When abci++ arrives, we should return a + // real response. + Self::default() + } + } + + #[cfg(not(feature = "abcipp"))] + impl From for ResponsePrepareProposal { + fn from(resp: PrepareProposal) -> Self { + Self { txs: resp.txs } + } + } #[derive(Debug, Default)] pub struct VerifyHeader; diff --git a/apps/src/lib/node/ledger/storage/mod.rs b/apps/src/lib/node/ledger/storage/mod.rs index f24cffa6a3..4a275a15bf 100644 --- a/apps/src/lib/node/ledger/storage/mod.rs +++ b/apps/src/lib/node/ledger/storage/mod.rs @@ -9,7 +9,8 @@ use arse_merkle_tree::blake2b::Blake2bHasher; use arse_merkle_tree::traits::Hasher; use arse_merkle_tree::H256; use blake2b_rs::{Blake2b, Blake2bBuilder}; -use namada::ledger::storage::{Storage, StorageHasher}; +use namada::ledger::storage::traits::StorageHasher; +use namada::ledger::storage::Storage; #[derive(Default)] pub struct PersistentStorageHasher(Blake2bHasher); diff --git a/apps/src/lib/node/ledger/storage/rocksdb.rs b/apps/src/lib/node/ledger/storage/rocksdb.rs index 86980813fc..e0c7581ea0 100644 --- a/apps/src/lib/node/ledger/storage/rocksdb.rs +++ b/apps/src/lib/node/ledger/storage/rocksdb.rs @@ -1053,7 +1053,8 @@ mod imp { #[cfg(test)] mod test { - use namada::ledger::storage::{MerkleTree, Sha256Hasher}; + use namada::ledger::storage::traits::Sha256Hasher; + use namada::ledger::storage::MerkleTree; use namada::types::address::EstablishedAddressGen; use namada::types::storage::{BlockHash, Epoch, Epochs}; use tempfile::tempdir; diff --git a/apps/src/lib/node/ledger/tendermint_node.rs b/apps/src/lib/node/ledger/tendermint_node.rs index 748dd2b0c9..914c68ee19 100644 --- a/apps/src/lib/node/ledger/tendermint_node.rs +++ b/apps/src/lib/node/ledger/tendermint_node.rs @@ -4,9 +4,14 @@ use std::process::Stdio; use std::str::FromStr; use borsh::BorshSerialize; +use eyre::{eyre, Context}; use namada::types::chain::ChainId; -use namada::types::key::*; +use namada::types::key::{ + common, ed25519, secp256k1, tm_consensus_key_raw_hash, ParseSecretKeyError, + RefTo, SecretKey, +}; use namada::types::time::DateTimeUtc; +use semver::{Version, VersionReq}; use serde_json::json; use thiserror::Error; use tokio::fs::{self, File, OpenOptions}; @@ -14,7 +19,7 @@ use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::process::Command; use crate::config; -use crate::facade::tendermint::Genesis; +use crate::facade::tendermint::{block, Genesis}; use crate::facade::tendermint_config::net::Address as TendermintAddress; use crate::facade::tendermint_config::{ Error as TendermintError, TendermintConfig, @@ -23,6 +28,47 @@ use crate::facade::tendermint_config::{ /// Env. var to output Tendermint log to stdout pub const ENV_VAR_TM_STDOUT: &str = "NAMADA_TM_STDOUT"; +#[cfg(feature = "abciplus")] +pub const VERSION_REQUIREMENTS: &str = ">= 0.37.0-alpha.2, <0.38.0"; +#[cfg(not(feature = "abciplus"))] +// TODO: update from our v0.36-based fork to v0.38 for full ABCI++ +pub const VERSION_REQUIREMENTS: &str = "= 0.1.1-abcipp"; + +/// Return the Tendermint version requirements for this build of Namada +fn version_requirements() -> VersionReq { + VersionReq::parse(VERSION_REQUIREMENTS) + .expect("Unable to parse Tendermint version requirements!") +} + +/// Return the [`Version`] of the Tendermint binary specified at +/// `tendermint_path` +async fn get_version(tendermint_path: &str) -> eyre::Result { + let version = run_version_command(tendermint_path).await?; + parse_version(&version) +} + +/// Runs `tendermint version` and returns the output as a string +async fn run_version_command(tendermint_path: &str) -> eyre::Result { + let output = Command::new(tendermint_path) + .arg("version") + .output() + .await?; + let output = String::from_utf8(output.stdout)?; + Ok(output) +} + +/// Parses the raw output of `tendermint version` (e.g. "v0.37.0-alpha.2\n") +/// into a [`Version`] +fn parse_version(version_cmd_output: &str) -> eyre::Result { + let version_str = version_cmd_output.trim_end().trim_start_matches('v'); + Version::parse(version_str).wrap_err_with(|| { + eyre!( + "Couldn't parse semantic version from Tendermint version string: \ + {version_str}" + ) + }) +} + #[derive(Error, Debug)] pub enum Error { #[error("Failed to initialize Tendermint: {0}")] @@ -73,8 +119,36 @@ pub async fn run( tokio::sync::oneshot::Sender<()>, >, ) -> Result<()> { - let home_dir_string = home_dir.to_string_lossy().to_string(); let tendermint_path = from_env_or_default()?; + + let version_reqs = version_requirements(); + match get_version(&tendermint_path).await { + Ok(version) => { + if version_reqs.matches(&version) { + tracing::info!( + %tendermint_path, + %version, + %version_reqs, + "Running with supported Tendermint version", + ); + } else { + tracing::warn!( + %tendermint_path, + %version, + %version_reqs, + "Running with a Tendermint version which may not be supported - run at your own risk!", + ); + } + } + Err(error) => tracing::warn!( + %tendermint_path, + %version_reqs, + %error, + "Couldn't check if Tendermint version is supported - run at your own risk!", + ), + }; + + let home_dir_string = home_dir.to_string_lossy().to_string(); let mode = config.tendermint_mode.to_str().to_owned(); #[cfg(feature = "dev")] @@ -304,7 +378,6 @@ async fn update_tendermint_config( let path = home_dir.join("config").join("config.toml"); let mut config = TendermintConfig::load_toml_file(&path).map_err(Error::LoadConfig)?; - config.p2p.laddr = TendermintAddress::from_str(&tendermint_config.p2p_address.to_string()) .unwrap(); @@ -348,6 +421,7 @@ async fn update_tendermint_config( .open(path) .await .map_err(Error::OpenWriteConfig)?; + let config_str = toml::to_string(&config).map_err(Error::ConfigSerializeToml)?; file.write_all(config_str.as_bytes()) @@ -380,6 +454,19 @@ async fn write_tm_genesis( genesis.genesis_time = genesis_time .try_into() .expect("Couldn't convert DateTimeUtc to Tendermint Time"); + let size = block::Size { + // maximum size of a serialized Tendermint block + // cannot go over 100 MiB + max_bytes: (100 << 20) - 1, /* unsure if we are dealing with an open + * range, so it's better to subtract one, + * here */ + // gas is metered app-side, so we disable it + // at the Tendermint level + max_gas: -1, + }; + #[cfg(not(feature = "abcipp"))] + let size = Some(size); + genesis.consensus_params.block = size; #[cfg(feature = "abcipp")] { genesis.consensus_params.timeout.commit = @@ -403,3 +490,40 @@ async fn write_tm_genesis( .await .expect("Couldn't write the Tendermint genesis file"); } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + /// This is really just a smoke test to make sure the + /// [`VERSION_REQUIREMENTS`] constant is always parseable to a + /// [`VersionReq`] + fn test_version_requirements() { + _ = version_requirements(); + } + + #[test] + fn test_parse_version() { + let version_str = "v0.37.0-alpha.2\n"; + let version = parse_version(version_str).unwrap(); + assert_eq!(version.major, 0); + assert_eq!(version.minor, 37); + assert_eq!(version.patch, 0); + + let version_str = "v0.1.1-abcipp\n"; + let version = parse_version(version_str).unwrap(); + assert_eq!(version.major, 0); + assert_eq!(version.minor, 1); + assert_eq!(version.patch, 1); + + let version_str = "v0.38.1\n"; + let version = parse_version(version_str).unwrap(); + assert_eq!(version.major, 0); + assert_eq!(version.minor, 38); + assert_eq!(version.patch, 1); + + let version_str = "unparseable"; + assert!(parse_version(version_str).is_err()); + } +} diff --git a/apps/src/lib/wallet/defaults.rs b/apps/src/lib/wallet/defaults.rs index b0ae08ac83..df251da589 100644 --- a/apps/src/lib/wallet/defaults.rs +++ b/apps/src/lib/wallet/defaults.rs @@ -20,7 +20,7 @@ pub fn addresses_from_genesis(genesis: GenesisConfig) -> Vec<(Alias, Address)> { ("pos".into(), pos::ADDRESS), ("pos_slash_pool".into(), pos::SLASH_POOL_ADDRESS), ("governance".into(), governance::ADDRESS), - ("eth_bridge".into(), eth_bridge::vp::ADDRESS), + ("eth_bridge".into(), eth_bridge::ADDRESS), ]; // Genesis validators let validator_addresses = @@ -78,13 +78,23 @@ mod dev { use crate::wallet::alias::Alias; - /// Generate a new protocol signing keypair and DKG session keypair - pub fn validator_keys() -> (common::SecretKey, DkgKeypair) { + /// Generate a new protocol signing keypair, eth hot key and DKG session + /// keypair + pub fn validator_keys() -> (common::SecretKey, common::SecretKey, DkgKeypair) + { + // ed25519 bytes let bytes: [u8; 33] = [ 0, 200, 107, 23, 252, 78, 80, 8, 164, 142, 3, 194, 33, 12, 250, 169, 211, 127, 47, 13, 194, 54, 199, 81, 102, 246, 189, 119, 144, 25, 27, 113, 222, ]; + // secp256k1 bytes + let eth_bridge_key_bytes = [ + 1, 117, 93, 118, 129, 202, 67, 51, 62, 202, 196, 130, 244, 5, 44, + 88, 200, 121, 169, 11, 227, 79, 223, 74, 88, 49, 132, 213, 59, 64, + 20, 13, 82, + ]; + // DkgKeypair let dkg_bytes = [ 32, 0, 0, 0, 210, 193, 55, 24, 92, 233, 23, 2, 73, 204, 221, 107, 110, 222, 192, 136, 54, 24, 108, 236, 137, 27, 121, 142, 142, 7, @@ -93,6 +103,8 @@ mod dev { ( BorshDeserialize::deserialize(&mut bytes.as_ref()).unwrap(), + BorshDeserialize::deserialize(&mut eth_bridge_key_bytes.as_ref()) + .unwrap(), BorshDeserialize::deserialize(&mut dkg_bytes.as_ref()).unwrap(), ) } diff --git a/apps/src/lib/wallet/mod.rs b/apps/src/lib/wallet/mod.rs index b79ff6703b..c6a806912f 100644 --- a/apps/src/lib/wallet/mod.rs +++ b/apps/src/lib/wallet/mod.rs @@ -164,27 +164,46 @@ impl Wallet { /// we should re-use a keypair already in the wallet pub fn gen_validator_keys( &mut self, + eth_bridge_pk: Option, protocol_pk: Option, - scheme: SchemeType, + protocol_key_scheme: SchemeType, ) -> Result { - let protocol_keypair = protocol_pk.map(|pk| { - self.find_key_by_pkh(&PublicKeyHash::from(&pk)) - .ok() - .or_else(|| { - self.store - .validator_data - .take() - .map(|data| data.keys.protocol_keypair) - }) - .ok_or(FindKeyError::KeyNotFound) - }); - match protocol_keypair { - Some(Err(err)) => Err(err), - other => Ok(Store::gen_validator_keys( - other.map(|res| res.unwrap()), - scheme, - )), - } + let protocol_keypair = self + .find_secret_key(protocol_pk, |data| data.keys.protocol_keypair)?; + let eth_bridge_keypair = self + .find_secret_key(eth_bridge_pk, |data| { + data.keys.eth_bridge_keypair + })?; + Ok(Store::gen_validator_keys( + eth_bridge_keypair, + protocol_keypair, + protocol_key_scheme, + )) + } + + /// Find a corresponding [`common::SecretKey`] in [`Store`], for some + /// [`common::PublicKey`]. + /// + /// If a key was provided in `maybe_pk`, and it's found in [`Store`], we use + /// `extract_key` to retrieve it from [`ValidatorData`]. + fn find_secret_key( + &mut self, + maybe_pk: Option, + extract_key: F, + ) -> Result, FindKeyError> + where + F: Fn(ValidatorData) -> common::SecretKey, + { + maybe_pk + .map(|pk| { + self.find_key_by_pkh(&PublicKeyHash::from(&pk)) + .ok() + .or_else(|| { + self.store.validator_data.take().map(extract_key) + }) + .ok_or(FindKeyError::KeyNotFound) + }) + .transpose() } /// Add validator data to the store diff --git a/apps/src/lib/wallet/pre_genesis.rs b/apps/src/lib/wallet/pre_genesis.rs index d3c5fa14c5..9b7572d8b8 100644 --- a/apps/src/lib/wallet/pre_genesis.rs +++ b/apps/src/lib/wallet/pre_genesis.rs @@ -39,6 +39,10 @@ pub struct ValidatorWallet { pub account_key: common::SecretKey, /// Cryptographic keypair for consensus key pub consensus_key: common::SecretKey, + /// Cryptographic keypair for eth cold key + pub eth_cold_key: common::SecretKey, + /// Cryptographic keypair for eth hot key + pub eth_hot_key: common::SecretKey, /// Cryptographic keypair for Tendermint node key pub tendermint_node_key: common::SecretKey, } @@ -51,9 +55,11 @@ pub struct ValidatorStore { pub account_key: wallet::StoredKeypair, /// Cryptographic keypair for consensus key pub consensus_key: wallet::StoredKeypair, + /// Cryptographic keypair for eth cold key + pub eth_cold_key: wallet::StoredKeypair, /// Cryptographic keypair for Tendermint node key pub tendermint_node_key: wallet::StoredKeypair, - /// Special validator keys + /// Special validator keys. Contains the ETH hot key. pub validator_keys: wallet::ValidatorKeys, } @@ -113,6 +119,10 @@ impl ValidatorWallet { store.account_key.get(true, password.clone())?; let consensus_key = store.consensus_key.get(true, password.clone())?; + let eth_cold_key = + store.eth_cold_key.get(true, password.clone())?; + let eth_hot_key = + store.validator_keys.eth_bridge_keypair.clone(); let tendermint_node_key = store.tendermint_node_key.get(true, password)?; @@ -120,6 +130,8 @@ impl ValidatorWallet { store, account_key, consensus_key, + eth_cold_key, + eth_hot_key, tendermint_node_key, }) } @@ -140,15 +152,20 @@ impl ValidatorWallet { SchemeType::Ed25519, &password, ); + let (eth_cold_key, eth_cold_sk) = + gen_key_to_store(SchemeType::Secp256k1, &password); let (tendermint_node_key, tendermint_node_sk) = gen_key_to_store( // Note that TM only allows ed25519 for node IDs SchemeType::Ed25519, &password, ); - let validator_keys = store::Store::gen_validator_keys(None, scheme); + let validator_keys = + store::Store::gen_validator_keys(None, None, scheme); + let eth_hot_key = validator_keys.eth_bridge_keypair.clone(); let store = ValidatorStore { account_key, consensus_key, + eth_cold_key, tendermint_node_key, validator_keys, }; @@ -156,6 +173,8 @@ impl ValidatorWallet { store, account_key: account_sk, consensus_key: consensus_sk, + eth_cold_key: eth_cold_sk, + eth_hot_key, tendermint_node_key: tendermint_node_sk, } } diff --git a/apps/src/lib/wallet/store.rs b/apps/src/lib/wallet/store.rs index aa1ae1ca88..6606486f83 100644 --- a/apps/src/lib/wallet/store.rs +++ b/apps/src/lib/wallet/store.rs @@ -31,6 +31,8 @@ use crate::config::genesis::genesis_config::GenesisConfig; pub struct ValidatorKeys { /// Special keypair for signing protocol txs pub protocol_keypair: common::SecretKey, + /// Special hot keypair for signing Ethereum bridge txs + pub eth_bridge_keypair: common::SecretKey, /// Special session keypair needed by validators for participating /// in the DKG protocol pub dkg_keypair: Option, @@ -375,16 +377,28 @@ impl Store { /// /// Note that this removes the validator data. pub fn gen_validator_keys( + eth_bridge_keypair: Option, protocol_keypair: Option, - scheme: SchemeType, + protocol_keypair_scheme: SchemeType, ) -> ValidatorKeys { + let eth_bridge_keypair = eth_bridge_keypair + .map(|k| { + if !matches!(&k, common::SecretKey::Secp256k1(_)) { + panic!( + "Ethereum bridge keys can only be of kind Secp256k1" + ); + } + k + }) + .unwrap_or_else(|| gen_sk(SchemeType::Secp256k1)); let protocol_keypair = - protocol_keypair.unwrap_or_else(|| gen_sk(scheme)); + protocol_keypair.unwrap_or_else(|| gen_sk(protocol_keypair_scheme)); let dkg_keypair = ferveo_common::Keypair::::new( &mut StdRng::from_entropy(), ); ValidatorKeys { protocol_keypair, + eth_bridge_keypair, dkg_keypair: Some(dkg_keypair.into()), } } @@ -764,7 +778,7 @@ mod test_wallet { fn test_toml_roundtrip_ed25519() { let mut store = Store::new(); let validator_keys = - Store::gen_validator_keys(None, SchemeType::Ed25519); + Store::gen_validator_keys(None, None, SchemeType::Ed25519); store.add_validator_data( Address::decode("atest1v4ehgw36x3prswzxggunzv6pxqmnvdj9xvcyzvpsggeyvs3cg9qnywf589qnwvfsg5erg3fkl09rg5").unwrap(), validator_keys @@ -777,7 +791,7 @@ mod test_wallet { fn test_toml_roundtrip_secp256k1() { let mut store = Store::new(); let validator_keys = - Store::gen_validator_keys(None, SchemeType::Secp256k1); + Store::gen_validator_keys(None, None, SchemeType::Secp256k1); store.add_validator_data( Address::decode("atest1v4ehgw36x3prswzxggunzv6pxqmnvdj9xvcyzvpsggeyvs3cg9qnywf589qnwvfsg5erg3fkl09rg5").unwrap(), validator_keys diff --git a/core/Cargo.toml b/core/Cargo.toml index 7754310669..671da71adc 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_core" resolver = "2" -version = "0.12.0" +version = "0.12.1" [features] default = [] @@ -65,9 +65,11 @@ chrono = {version = "0.4.22", default-features = false, features = ["clock", "st data-encoding = "2.3.2" derivative = "2.2.0" ed25519-consensus = "1.2.0" -ferveo = {optional = true, git = "https://github.com/anoma/ferveo"} -ferveo-common = {git = "https://github.com/anoma/ferveo"} -tpke = {package = "group-threshold-cryptography", optional = true, git = "https://github.com/anoma/ferveo"} +ethabi = "17.0.0" +eyre = "0.6.8" +ferveo = {optional = true, git = "https://github.com/anoma/ferveo", rev = "9e5e91c954158e7cff45c483fd06cd649a81553f"} +ferveo-common = {git = "https://github.com/anoma/ferveo", rev = "9e5e91c954158e7cff45c483fd06cd649a81553f"} +tpke = {package = "group-threshold-cryptography", optional = true, git = "https://github.com/anoma/ferveo", rev = "9e5e91c954158e7cff45c483fd06cd649a81553f"} # TODO using the same version of tendermint-rs as we do here. ibc = {version = "0.14.0", default-features = false, optional = true} ibc-proto = {version = "0.17.1", default-features = false, optional = true} @@ -78,6 +80,7 @@ index-set = {git = "https://github.com/heliaxdev/index-set", tag = "v0.7.1", fea itertools = "0.10.0" libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9", default-features = false, features = ["std", "static-context"]} masp_primitives = { git = "https://github.com/anoma/masp", rev = "bee40fc465f6afbd10558d12fe96eb1742eee45c" } +num-rational = "0.4.1" proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm", optional = true} prost = "0.9.0" prost-types = "0.9.0" @@ -94,6 +97,7 @@ tendermint-proto = {version = "0.23.6", optional = true} tendermint-abcipp = {package = "tendermint", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} tendermint-proto-abcipp = {package = "tendermint-proto", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} thiserror = "1.0.30" +tiny-keccak = {version = "2.0.2", features = ["keccak"]} tracing = "0.1.30" zeroize = {version = "1.5.5", features = ["zeroize_derive"]} diff --git a/core/src/hints.rs b/core/src/hints.rs new file mode 100644 index 0000000000..78d49eeab5 --- /dev/null +++ b/core/src/hints.rs @@ -0,0 +1,44 @@ +//! Compiler hints, to improve the performance of certain operations. + +/// A function that is seldom called. +#[inline] +#[cold] +pub fn cold() {} + +/// A likely path to be taken in an if-expression. +/// +/// # Example +/// +/// ```ignore +/// if likely(frequent_condition()) { +/// // most common path to take +/// } else { +/// // ... +/// } +/// ``` +#[inline] +pub fn likely(b: bool) -> bool { + if !b { + cold() + } + b +} + +/// An unlikely path to be taken in an if-expression. +/// +/// # Example +/// +/// ```ignore +/// if unlikely(rare_condition()) { +/// // ... +/// } else { +/// // most common path to take +/// } +/// ``` +#[inline] +pub fn unlikely(b: bool) -> bool { + if b { + cold() + } + b +} diff --git a/core/src/ledger/eth_bridge/mod.rs b/core/src/ledger/eth_bridge/mod.rs new file mode 100644 index 0000000000..5932c2c8d7 --- /dev/null +++ b/core/src/ledger/eth_bridge/mod.rs @@ -0,0 +1,11 @@ +//! Storage keys for the Ethereum bridge account + +pub mod storage; + +use crate::types::address::{Address, InternalAddress}; + +/// The [`InternalAddress`] of the Ethereum bridge account +pub const INTERNAL_ADDRESS: InternalAddress = InternalAddress::EthBridge; + +/// The [`Address`] of the Ethereum bridge account +pub const ADDRESS: Address = Address::Internal(INTERNAL_ADDRESS); diff --git a/core/src/ledger/eth_bridge/storage/bridge_pool.rs b/core/src/ledger/eth_bridge/storage/bridge_pool.rs new file mode 100644 index 0000000000..526635bdbd --- /dev/null +++ b/core/src/ledger/eth_bridge/storage/bridge_pool.rs @@ -0,0 +1,897 @@ +//! Tools for accessing the storage subspaces of the Ethereum +//! bridge pool +use std::collections::BTreeSet; +use std::convert::TryInto; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use ethabi::Token; +use eyre::eyre; + +use crate::types::address::{Address, InternalAddress}; +use crate::types::eth_abi::Encode; +use crate::types::eth_bridge_pool::PendingTransfer; +use crate::types::hash::Hash; +use crate::types::keccak::{keccak_hash, KeccakHash}; +use crate::types::storage::{DbKeySeg, Key, KeySeg}; + +/// The main address of the Ethereum bridge pool +pub const BRIDGE_POOL_ADDRESS: Address = + Address::Internal(InternalAddress::EthBridgePool); +/// Sub-segment for getting the latest signed +const SIGNED_ROOT_SEG: &str = "signed_root"; + +#[derive(thiserror::Error, Debug)] +#[error(transparent)] +/// Generic error that may be returned by the validity predicate +pub struct Error(#[from] eyre::Error); + +/// Get the storage key for the transfers in the pool +pub fn get_pending_key(transfer: &PendingTransfer) -> Key { + get_key_from_hash(&transfer.keccak256()) +} + +/// Get the storage key for the transfers using the hash +pub fn get_key_from_hash(hash: &KeccakHash) -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(BRIDGE_POOL_ADDRESS), + hash.to_db_key(), + ], + } +} + +/// Get the storage key for the root of the Merkle tree +/// containing the transfers in the pool +pub fn get_signed_root_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(BRIDGE_POOL_ADDRESS), + DbKeySeg::StringSeg(SIGNED_ROOT_SEG.into()), + ], + } +} + +/// Check if a key belongs to the bridge pools sub-storage +pub fn is_bridge_pool_key(key: &Key) -> bool { + matches!(&key.segments[0], DbKeySeg::AddressSeg(addr) if addr == &BRIDGE_POOL_ADDRESS) +} + +/// A simple Merkle tree for the Ethereum bridge pool +/// +/// Note that an empty tree has root [0u8; 20] by definition. +#[derive( + Debug, Default, Clone, BorshSerialize, BorshDeserialize, BorshSchema, +)] +pub struct BridgePoolTree { + /// Root of the tree + root: KeccakHash, + /// The underlying storage, containing hashes of [`PendingTransfer`]s. + leaves: BTreeSet, +} + +impl BridgePoolTree { + /// Create a new merkle tree for the Ethereum bridge pool + pub fn new(root: KeccakHash, store: BTreeSet) -> Self { + Self { + root, + leaves: store, + } + } + + /// Parse the key to ensure it is of the correct type. + /// + /// If it is, it can be converted to a hash. + /// Checks if the hash is in the tree. + pub fn contains_key(&self, key: &Key) -> Result { + Ok(self.leaves.contains(&Self::parse_key(key)?)) + } + + /// Update the tree with a new value. + /// + /// Returns the new root if successful. Will + /// return an error if the key is malformed. + pub fn insert_key(&mut self, key: &Key) -> Result { + let hash = Self::parse_key(key)?; + _ = self.leaves.insert(hash); + self.root = self.compute_root(); + Ok(self.root().into()) + } + + /// Delete a key from storage and update the root + pub fn delete_key(&mut self, key: &Key) -> Result<(), Error> { + let hash = Self::parse_key(key)?; + _ = self.leaves.remove(&hash); + self.root = self.compute_root(); + Ok(()) + } + + /// Compute the root of the merkle tree + fn compute_root(&self) -> KeccakHash { + let mut hashes: Vec = self.leaves.iter().cloned().collect(); + while hashes.len() > 1 { + let mut next_hashes = vec![]; + for pair in hashes.chunks(2) { + let left = pair[0].clone(); + let right = pair.get(1).cloned().unwrap_or_default(); + next_hashes.push(hash_pair(left, right)); + } + hashes = next_hashes; + } + + if hashes.is_empty() { + Default::default() + } else { + hashes.remove(0) + } + } + + /// Return the root as a [`struct@Hash`] type. + pub fn root(&self) -> KeccakHash { + self.root.clone() + } + + /// Get a reference to the backing store + pub fn store(&self) -> &BTreeSet { + &self.leaves + } + + /// Create a batched membership proof for the provided keys + pub fn get_membership_proof( + &self, + mut values: Vec, + ) -> Result { + // sort the values according to their hash values + values.sort_by_key(|transfer| transfer.keccak256()); + + // get the leaf hashes + let leaves: BTreeSet = + values.iter().map(|v| v.keccak256()).collect(); + if !leaves.is_subset(&self.leaves) { + return Err(eyre!( + "Cannot generate proof for values that aren't in the tree" + ) + .into()); + } + let mut proof_hashes = vec![]; + let mut flags = vec![]; + let mut hashes: Vec<_> = self + .leaves + .iter() + .cloned() + .map(|hash| { + if leaves.contains(&hash) { + Node::OnPath(hash) + } else { + Node::OffPath(hash) + } + }) + .collect(); + + while hashes.len() > 1 { + let mut next_hashes = vec![]; + + for pair in hashes.chunks(2) { + let left = pair[0].clone(); + let right = pair.get(1).cloned().unwrap_or_default(); + match (left, right) { + (Node::OnPath(left), Node::OnPath(right)) => { + flags.push(true); + next_hashes + .push(Node::OnPath(hash_pair(left.clone(), right))); + } + (Node::OnPath(hash), Node::OffPath(sib)) => { + flags.push(false); + proof_hashes.push(sib.clone()); + next_hashes + .push(Node::OnPath(hash_pair(hash.clone(), sib))); + } + (Node::OffPath(sib), Node::OnPath(hash)) => { + flags.push(false); + proof_hashes.push(sib.clone()); + next_hashes + .push(Node::OnPath(hash_pair(hash, sib.clone()))); + } + (Node::OffPath(left), Node::OffPath(right)) => { + next_hashes.push(Node::OffPath(hash_pair( + left.clone(), + right, + ))); + } + } + } + hashes = next_hashes; + } + // add the root to the proof + if flags.is_empty() && proof_hashes.is_empty() && leaves.is_empty() { + proof_hashes.push(self.root.clone()); + } + + Ok(BridgePoolProof { + proof: proof_hashes, + leaves: values, + flags, + }) + } + + /// Parse a db key to see if it is valid for the + /// bridge pool. + /// + /// It should have one string segment which should + /// parse into a [Hash] + fn parse_key(key: &Key) -> Result { + if key.segments.len() == 1 { + match &key.segments[0] { + DbKeySeg::StringSeg(str) => { + str.as_str().try_into().map_err(|_| { + eyre!("Could not parse key segment as a hash").into() + }) + } + _ => Err(eyre!("Bridge pool keys should be strings.").into()), + } + } else { + Err(eyre!( + "Key for the bridge pool should have exactly one segment." + ) + .into()) + } + } +} + +/// Concatenate two keccak hashes and hash the result +#[inline] +fn hash_pair(left: KeccakHash, right: KeccakHash) -> KeccakHash { + if left.0 < right.0 { + keccak_hash([left.0, right.0].concat().as_slice()) + } else { + keccak_hash([right.0, left.0].concat().as_slice()) + } +} + +/// Keeps track if a node is on a path from the +/// root of the merkle tree to one of the leaves +/// being included in a multi-proof. +#[derive(Debug, Clone)] +enum Node { + /// Node is on a path from root to leaf in proof + OnPath(KeccakHash), + /// Node is not on a path from root to leaf in proof + OffPath(KeccakHash), +} + +impl Default for Node { + fn default() -> Self { + Self::OffPath(Default::default()) + } +} + +/// A multi-leaf membership proof +pub struct BridgePoolProof { + /// The hashes other than the provided leaves + pub proof: Vec, + /// The leaves; must be sorted + pub leaves: Vec, + /// Flags are used to indicate which consecutive + /// pairs of leaves in `leaves` are siblings. + pub flags: Vec, +} + +impl BridgePoolProof { + /// Verify a membership proof matches the provided root + pub fn verify(&self, root: KeccakHash) -> bool { + if self.proof.len() + self.leaves.len() != self.flags.len() + 1 { + return false; + } + if self.flags.is_empty() { + return if let Some(leaf) = self.leaves.last() { + root == leaf.keccak256() + } else { + match self.proof.last() { + Some(proof_root) => &root == proof_root, + None => false, + } + }; + } + let total_hashes = self.flags.len(); + let leaf_len = self.leaves.len(); + + let mut hashes = vec![KeccakHash::default(); self.flags.len()]; + let mut hash_pos = 0usize; + let mut leaf_pos = 0usize; + let mut proof_pos = 0usize; + + for i in 0..total_hashes { + let left = if leaf_pos < leaf_len { + let next = self.leaves[leaf_pos].keccak256(); + leaf_pos += 1; + next + } else { + let next = hashes[hash_pos].clone(); + hash_pos += 1; + next + }; + let right = if self.flags[i] { + if leaf_pos < leaf_len { + let next = self.leaves[leaf_pos].keccak256(); + leaf_pos += 1; + next + } else { + let next = hashes[hash_pos].clone(); + hash_pos += 1; + next + } + } else { + let next = self.proof[proof_pos].clone(); + proof_pos += 1; + next + }; + hashes[i] = hash_pair(left, right); + } + + if let Some(computed) = hashes.last() { + *computed == root + } else { + false + } + } +} + +impl Encode<3> for BridgePoolProof { + fn tokenize(&self) -> [Token; 3] { + let BridgePoolProof { + proof, + leaves, + flags, + } = self; + let proof = Token::Array( + proof + .iter() + .map(|hash| Token::FixedBytes(hash.0.to_vec())) + .collect(), + ); + let transfers = Token::Array( + leaves + .iter() + .map(|t| Token::FixedArray(t.tokenize().to_vec())) + .collect(), + ); + let flags = + Token::Array(flags.iter().map(|flag| Token::Bool(*flag)).collect()); + [proof, transfers, flags] + } +} + +#[cfg(test)] +mod test_bridge_pool_tree { + + use itertools::Itertools; + use proptest::prelude::*; + + use super::*; + use crate::types::eth_bridge_pool::{GasFee, TransferToEthereum}; + use crate::types::ethereum_events::EthAddress; + + /// An established user address for testing & development + fn bertha_address() -> Address { + Address::decode("atest1v4ehgw36xvcyyvejgvenxs34g3zygv3jxqunjd6rxyeyys3sxy6rwvfkx4qnj33hg9qnvse4lsfctw") + .expect("The token address decoding shouldn't fail") + } + + /// Test that if tree has a single leaf, its root is the hash + /// of that leaf + #[test] + fn test_update_single_key() { + let mut tree = BridgePoolTree::default(); + assert_eq!(tree.root().0, [0; 32]); + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([1; 20]), + sender: bertha_address(), + recipient: EthAddress([2; 20]), + amount: 1.into(), + nonce: 42u64.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + let key = Key::from(&transfer); + let root = + KeccakHash::from(tree.insert_key(&key).expect("Test failed")); + assert_eq!(root, transfer.keccak256()); + } + + #[test] + fn test_two_keys() { + let mut tree = BridgePoolTree::default(); + let mut transfers = vec![]; + for i in 0..2 { + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([i; 20]), + sender: bertha_address(), + recipient: EthAddress([i + 1; 20]), + amount: (i as u64).into(), + nonce: 42u64.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + let key = Key::from(&transfer); + transfers.push(transfer); + let _ = tree.insert_key(&key).expect("Test failed"); + } + let expected = + hash_pair(transfers[0].keccak256(), transfers[1].keccak256()); + assert_eq!(tree.root(), expected); + } + + /// This is the first number of keys to use dummy leaves + #[test] + fn test_three_leaves() { + let mut tree = BridgePoolTree::default(); + let mut transfers = vec![]; + for i in 0..3 { + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([i; 20]), + sender: bertha_address(), + recipient: EthAddress([i + 1; 20]), + amount: (i as u64).into(), + nonce: 42u64.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + let key = Key::from(&transfer); + transfers.push(transfer); + let _ = tree.insert_key(&key).expect("Test failed"); + } + transfers.sort_by_key(|t| t.keccak256()); + let hashes: BTreeSet = + transfers.iter().map(|t| t.keccak256()).collect(); + assert_eq!(hashes, tree.leaves); + + let left_hash = + hash_pair(transfers[0].keccak256(), transfers[1].keccak256()); + let right_hash = + hash_pair(transfers[2].keccak256(), Default::default()); + let expected = hash_pair(left_hash, right_hash); + assert_eq!(tree.root(), expected); + } + + /// Test removing all keys + #[test] + fn test_delete_all_keys() { + let mut tree = BridgePoolTree::default(); + + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([1; 20]), + sender: bertha_address(), + recipient: EthAddress([2; 20]), + amount: 1.into(), + nonce: 42u64.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + let key = Key::from(&transfer); + let root = + KeccakHash::from(tree.insert_key(&key).expect("Test failed")); + assert_eq!(root, transfer.keccak256()); + tree.delete_key(&key).expect("Test failed"); + assert_eq!(tree.root().0, [0; 32]); + } + + /// Test deleting a key + #[test] + fn test_delete_key() { + let mut tree = BridgePoolTree::default(); + let mut transfers = vec![]; + for i in 0..3 { + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([i; 20]), + sender: bertha_address(), + recipient: EthAddress([i + 1; 20]), + amount: (i as u64).into(), + nonce: 42u64.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + + let key = Key::from(&transfer); + transfers.push(transfer); + let _ = tree.insert_key(&key).expect("Test failed"); + } + transfers.sort_by_key(|t| t.keccak256()); + tree.delete_key(&Key::from(&transfers[1])) + .expect("Test failed"); + + let expected = + hash_pair(transfers[0].keccak256(), transfers[2].keccak256()); + assert_eq!(tree.root(), expected); + } + + /// Test that parse key works correctly + #[test] + fn test_parse_key() { + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([1; 20]), + sender: bertha_address(), + recipient: EthAddress([2; 20]), + amount: 1u64.into(), + nonce: 42u64.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + let expected = transfer.keccak256(); + let key = Key::from(&transfer); + assert_eq!( + BridgePoolTree::parse_key(&key).expect("Test failed"), + expected + ); + } + + /// Test that parsing a key with multiple segments fails + #[test] + fn test_key_multiple_segments() { + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([1; 20]), + sender: bertha_address(), + recipient: EthAddress([2; 20]), + amount: 1u64.into(), + nonce: 42u64.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + let hash = transfer.keccak256().to_string(); + let key = Key { + segments: vec![ + DbKeySeg::AddressSeg(bertha_address()), + DbKeySeg::StringSeg(hash), + ], + }; + assert!(BridgePoolTree::parse_key(&key).is_err()); + } + + /// Test that parsing a key that is not a hash fails + #[test] + fn test_key_not_hash() { + let key = Key { + segments: vec![DbKeySeg::StringSeg("bloop".into())], + }; + assert!(BridgePoolTree::parse_key(&key).is_err()); + } + + /// Test that [`contains_key`] works correctly + #[test] + fn test_contains_key() { + let mut tree = BridgePoolTree::default(); + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([1; 20]), + sender: bertha_address(), + recipient: EthAddress([2; 20]), + amount: 1.into(), + nonce: 42u64.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + tree.insert_key(&Key::from(&transfer)).expect("Test failed"); + assert!( + tree.contains_key(&Key::from(&transfer)) + .expect("Test failed") + ); + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([1; 20]), + sender: bertha_address(), + recipient: EthAddress([0; 20]), + amount: 1u64.into(), + nonce: 42u64.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + assert!( + !tree + .contains_key(&Key::from(&transfer)) + .expect("Test failed") + ); + } + + /// Test that the empty proof works. + #[test] + fn test_empty_proof() { + let tree = BridgePoolTree::default(); + let values = vec![]; + let proof = tree.get_membership_proof(values).expect("Test failed"); + assert!(proof.verify(Default::default())); + } + + /// Test that the proof works for proving the only leaf in the tree + #[test] + fn test_single_leaf() { + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([0; 20]), + sender: bertha_address(), + recipient: EthAddress([0; 20]), + amount: 0.into(), + nonce: 0.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + let mut tree = BridgePoolTree::default(); + let key = Key::from(&transfer); + let _ = tree.insert_key(&key).expect("Test failed"); + let proof = tree + .get_membership_proof(vec![transfer]) + .expect("Test failed"); + assert!(proof.verify(tree.root())); + } + + /// Check proofs for membership of single transfer + /// in a tree with two leaves. + #[test] + fn test_one_leaf_of_two_proof() { + let mut tree = BridgePoolTree::default(); + let mut transfers = vec![]; + for i in 0..2 { + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([i; 20]), + sender: bertha_address(), + recipient: EthAddress([i + 1; 20]), + amount: (i as u64).into(), + nonce: 42u64.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + + let key = Key::from(&transfer); + transfers.push(transfer); + let _ = tree.insert_key(&key).expect("Test failed"); + } + let proof = tree + .get_membership_proof(vec![transfers.remove(0)]) + .expect("Test failed"); + assert!(proof.verify(tree.root())); + } + + /// Test that a multiproof works for leaves who are siblings + #[test] + fn test_proof_two_out_of_three_leaves() { + let mut tree = BridgePoolTree::default(); + let mut transfers = vec![]; + for i in 0..3 { + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([i; 20]), + sender: bertha_address(), + recipient: EthAddress([i + 1; 20]), + amount: (i as u64).into(), + nonce: 42u64.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + + let key = Key::from(&transfer); + transfers.push(transfer); + let _ = tree.insert_key(&key).expect("Test failed"); + } + transfers.sort_by_key(|t| t.keccak256()); + let values = vec![transfers[0].clone(), transfers[1].clone()]; + let proof = tree.get_membership_proof(values).expect("Test failed"); + assert!(proof.verify(tree.root())); + } + + /// Test that proving an empty subset of leaves always works + #[test] + fn test_proof_no_leaves() { + let mut tree = BridgePoolTree::default(); + let mut transfers = vec![]; + for i in 0..3 { + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([i; 20]), + sender: bertha_address(), + recipient: EthAddress([i + 1; 20]), + amount: (i as u64).into(), + nonce: 42u64.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + let key = Key::from(&transfer); + transfers.push(transfer); + let _ = tree.insert_key(&key).expect("Test failed"); + } + let values = vec![]; + let proof = tree.get_membership_proof(values).expect("Test failed"); + assert!(proof.verify(tree.root())) + } + + /// Test a proof for all the leaves + #[test] + fn test_proof_all_leaves() { + let mut tree = BridgePoolTree::default(); + let mut transfers = vec![]; + for i in 0..2 { + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([i; 20]), + sender: bertha_address(), + recipient: EthAddress([i + 1; 20]), + amount: (i as u64).into(), + nonce: 42u64.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + let key = Key::from(&transfer); + transfers.push(transfer); + let _ = tree.insert_key(&key).expect("Test failed"); + } + transfers.sort_by_key(|t| t.keccak256()); + let proof = tree.get_membership_proof(transfers).expect("Test failed"); + assert!(proof.verify(tree.root())); + } + + /// Test a proof for all the leaves when the number of leaves is odd + #[test] + fn test_proof_all_leaves_odd() { + let mut tree = BridgePoolTree::default(); + let mut transfers = vec![]; + for i in 0..3 { + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([i; 20]), + sender: bertha_address(), + recipient: EthAddress([i + 1; 20]), + amount: (i as u64).into(), + nonce: 42u64.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + let key = Key::from(&transfer); + transfers.push(transfer); + let _ = tree.insert_key(&key).expect("Test failed"); + } + transfers.sort_by_key(|t| t.keccak256()); + let proof = tree.get_membership_proof(transfers).expect("Test failed"); + assert!(proof.verify(tree.root())); + } + + /// Test proofs of large trees + #[test] + fn test_large_proof() { + let mut tree = BridgePoolTree::default(); + let mut transfers = vec![]; + for i in 0..5 { + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([i; 20]), + sender: bertha_address(), + recipient: EthAddress([i + 1; 20]), + amount: (i as u64).into(), + nonce: 42u64.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + let key = Key::from(&transfer); + transfers.push(transfer); + let _ = tree.insert_key(&key).expect("Test failed"); + } + transfers.sort_by_key(|t| t.keccak256()); + let values: Vec<_> = transfers.iter().step_by(2).cloned().collect(); + let proof = tree.get_membership_proof(values).expect("Test failed"); + assert!(proof.verify(tree.root())); + } + + /// Create a random set of transfers. + fn random_transfers( + number: usize, + ) -> impl Strategy> { + prop::collection::vec( + (prop::array::uniform20(0u8..), prop::num::u64::ANY), + 0..=number, + ) + .prop_flat_map(|addrs| { + Just( + addrs + .into_iter() + .map(|(addr, nonce)| PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress(addr), + sender: bertha_address(), + recipient: EthAddress(addr), + amount: Default::default(), + nonce: nonce.into(), + }, + gas_fee: GasFee { + amount: Default::default(), + payer: bertha_address(), + }, + }) + .dedup() + .collect::>(), + ) + }) + } + + prop_compose! { + /// Creates a random set of transfers and + /// then returns them along with a chosen subset. + fn arb_transfers_and_subset() + (transfers in random_transfers(50)) + ( + transfers in Just(transfers.clone()), + to_prove in proptest::sample::subsequence(transfers.clone(), 0..=transfers.len()), + ) + -> (Vec, Vec) { + (transfers, to_prove) + } + } + + proptest! { + /// Given a random tree and a subset of leaves, + /// verify that the constructed multi-proof correctly + /// verifies. + #[test] + fn test_verify_proof((transfers, mut to_prove) in arb_transfers_and_subset()) { + let mut tree = BridgePoolTree::default(); + for transfer in &transfers { + let key = Key::from(transfer); + let _ = tree.insert_key(&key).expect("Test failed"); + } + + to_prove.sort_by_key(|t| t.keccak256()); + let proof = tree.get_membership_proof(to_prove).expect("Test failed"); + assert!(proof.verify(tree.root())); + } + } +} diff --git a/core/src/ledger/eth_bridge/storage/mod.rs b/core/src/ledger/eth_bridge/storage/mod.rs new file mode 100644 index 0000000000..60160710e1 --- /dev/null +++ b/core/src/ledger/eth_bridge/storage/mod.rs @@ -0,0 +1,109 @@ +//! Functionality for accessing the storage subspace +pub mod bridge_pool; +pub mod wrapped_erc20s; + +use super::ADDRESS; +use crate::types::address::nam; +use crate::types::storage::{DbKeySeg, Key, KeySeg}; +use crate::types::token::balance_key; + +/// Sub-key for storing the minimum confirmations parameter +pub const MIN_CONFIRMATIONS_SUBKEY: &str = "min_confirmations"; +/// Sub-key for storing the Ethereum address for wNam. +pub const NATIVE_ERC20_SUBKEY: &str = "native_erc20"; +/// Sub-lkey for storing the Ethereum address of the bridge contract. +pub const BRIDGE_CONTRACT_SUBKEY: &str = "bridge_contract_address"; +/// Sub-key for storing the Ethereum address of the governance contract. +pub const GOVERNANCE_CONTRACT_SUBKEY: &str = "governance_contract_address"; + +/// Key prefix for the storage subspace +pub fn prefix() -> Key { + Key::from(ADDRESS.to_db_key()) +} + +/// The key to the escrow of the VP. +pub fn escrow_key() -> Key { + balance_key(&nam(), &ADDRESS) +} + +/// Returns whether a key belongs to this account or not +pub fn is_eth_bridge_key(key: &Key) -> bool { + key == &escrow_key() + || matches!(key.segments.get(0), Some(first_segment) if first_segment == &ADDRESS.to_db_key()) +} + +/// Storage key for the minimum confirmations parameter. +pub fn min_confirmations_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(MIN_CONFIRMATIONS_SUBKEY.into()), + ], + } +} + +/// Storage key for the Ethereum address of wNam. +pub fn native_erc20_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(NATIVE_ERC20_SUBKEY.into()), + ], + } +} + +/// Storage key for the Ethereum address of the bridge contract. +pub fn bridge_contract_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(BRIDGE_CONTRACT_SUBKEY.into()), + ], + } +} + +/// Storage key for the Ethereum address of the governance contract. +pub fn governance_contract_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(GOVERNANCE_CONTRACT_SUBKEY.into()), + ], + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::types::address; + + #[test] + fn test_is_eth_bridge_key_returns_true_for_eth_bridge_address() { + let key = Key::from(super::ADDRESS.to_db_key()); + assert!(is_eth_bridge_key(&key)); + } + + #[test] + fn test_is_eth_bridge_key_returns_true_for_eth_bridge_subkey() { + let key = Key::from(super::ADDRESS.to_db_key()) + .push(&"arbitrary key segment".to_owned()) + .expect("Could not set up test"); + assert!(is_eth_bridge_key(&key)); + } + + #[test] + fn test_is_eth_bridge_key_returns_false_for_different_address() { + let key = + Key::from(address::testing::established_address_1().to_db_key()); + assert!(!is_eth_bridge_key(&key)); + } + + #[test] + fn test_is_eth_bridge_key_returns_false_for_different_address_subkey() { + let key = + Key::from(address::testing::established_address_1().to_db_key()) + .push(&"arbitrary key segment".to_owned()) + .expect("Could not set up test"); + assert!(!is_eth_bridge_key(&key)); + } +} diff --git a/core/src/ledger/eth_bridge/storage/wrapped_erc20s.rs b/core/src/ledger/eth_bridge/storage/wrapped_erc20s.rs new file mode 100644 index 0000000000..53bd8d2c33 --- /dev/null +++ b/core/src/ledger/eth_bridge/storage/wrapped_erc20s.rs @@ -0,0 +1,399 @@ +//! Functionality for accessing the multitoken subspace +use std::str::FromStr; + +use eyre::eyre; + +use crate::types::address::Address; +use crate::types::ethereum_events::EthAddress; +use crate::types::storage::{self, DbKeySeg, KeySeg}; + +#[allow(missing_docs)] +pub const MULTITOKEN_KEY_SEGMENT: &str = "ERC20"; + +/// Get the key prefix corresponding to the storage subspace that holds wrapped +/// ERC20 tokens +pub fn prefix() -> storage::Key { + super::prefix() + .push(&MULTITOKEN_KEY_SEGMENT.to_owned()) + .expect("should always be able to construct this key") +} + +const BALANCE_KEY_SEGMENT: &str = "balance"; +const SUPPLY_KEY_SEGMENT: &str = "supply"; + +/// Generator for the keys under which details of an ERC20 token are stored +pub struct Keys { + /// The prefix of keys under which the details for a specific ERC20 token + /// are stored + prefix: storage::Key, +} + +impl Keys { + /// Get the `balance` key for a specific owner - there should be a + /// [`crate::types::token::Amount`] stored here + pub fn balance(&self, owner: &Address) -> storage::Key { + self.prefix + .push(&BALANCE_KEY_SEGMENT.to_owned()) + .expect("should always be able to construct this key") + .push(&owner.to_db_key()) + .expect("should always be able to construct this key") + } + + /// Get the `supply` key - there should be a + /// [`crate::types::token::Amount`] stored here + pub fn supply(&self) -> storage::Key { + self.prefix + .push(&SUPPLY_KEY_SEGMENT.to_owned()) + .expect("should always be able to construct this key") + } +} + +impl From<&EthAddress> for Keys { + fn from(address: &EthAddress) -> Self { + Keys { + prefix: prefix() + .push(&address.to_canonical()) + .expect("should always be able to construct this key"), + } + } +} + +/// Construct a sub-prefix from an ERC20 address. +pub fn sub_prefix(address: &EthAddress) -> storage::Key { + storage::Key::from(MULTITOKEN_KEY_SEGMENT.to_owned().to_db_key()) + .push(&address.to_db_key()) + .expect("should always be able to construct this key") +} + +/// Represents the type of a key relating to a wrapped ERC20 +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone)] +pub enum KeyType { + /// The key holds a wrapped ERC20 balance + Balance { + /// The owner of the balance + owner: Address, + }, + /// A type of key which tracks the total supply of some wrapped ERC20 + Supply, +} + +/// Represents a key relating to a wrapped ERC20 +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone)] +pub struct Key { + /// The specific ERC20 as identified by its Ethereum address + pub asset: EthAddress, + /// The type of this key + pub suffix: KeyType, +} + +impl From<&Key> for storage::Key { + fn from(mt_key: &Key) -> Self { + let keys = Keys::from(&mt_key.asset); + match &mt_key.suffix { + KeyType::Balance { owner } => keys.balance(owner), + KeyType::Supply => keys.supply(), + } + } +} + +fn has_erc20_segment(key: &storage::Key) -> bool { + matches!( + key.segments.get(1), + Some(segment) if segment == &DbKeySeg::StringSeg(MULTITOKEN_KEY_SEGMENT.to_owned()), + ) +} + +impl TryFrom<&storage::Key> for Key { + type Error = eyre::Error; + + fn try_from(key: &storage::Key) -> Result { + if !super::is_eth_bridge_key(key) { + return Err(eyre!("key does not belong to the EthBridge")); + } + if !has_erc20_segment(key) { + return Err(eyre!("key does not have ERC20 segment")); + } + + let asset = + if let Some(DbKeySeg::StringSeg(segment)) = key.segments.get(2) { + EthAddress::from_str(segment)? + } else { + return Err(eyre!( + "key has an incorrect segment at index #2, expected an \ + Ethereum address" + )); + }; + + let segment_3 = + if let Some(DbKeySeg::StringSeg(segment)) = key.segments.get(3) { + segment.to_owned() + } else { + return Err(eyre!( + "key has an incorrect segment at index #3, expected a \ + string segment" + )); + }; + + match segment_3.as_str() { + SUPPLY_KEY_SEGMENT => { + let supply_key = Key { + asset, + suffix: KeyType::Supply, + }; + Ok(supply_key) + } + BALANCE_KEY_SEGMENT => { + let owner = if let Some(DbKeySeg::AddressSeg(address)) = + key.segments.get(4) + { + address.to_owned() + } else { + return Err(eyre!( + "key has an incorrect segment at index #4, expected \ + an address segment" + )); + }; + let balance_key = Key { + asset, + suffix: KeyType::Balance { owner }, + }; + Ok(balance_key) + } + _ => Err(eyre!("key has unrecognized string segment at index #3")), + } + } +} + +#[cfg(test)] +mod test { + use std::result::Result; + use std::str::FromStr; + + use super::*; + use crate::ledger::eth_bridge::ADDRESS; + use crate::types::address::Address; + use crate::types::ethereum_events::testing::{ + DAI_ERC20_ETH_ADDRESS, DAI_ERC20_ETH_ADDRESS_CHECKSUMMED, + }; + use crate::types::storage::DbKeySeg; + + const ARBITRARY_OWNER_ADDRESS: &str = + "atest1d9khqw36x9zyxwfhgfpygv2pgc65gse4gy6rjs34gfzr2v69gy6y23zpggurjv2yx5m52sesu6r4y4"; + + #[test] + fn test_prefix() { + assert_matches!( + &prefix().segments[..], + [ + DbKeySeg::AddressSeg(multitoken_addr), + DbKeySeg::StringSeg(multitoken_path), + ] if multitoken_addr == &ADDRESS && + multitoken_path == MULTITOKEN_KEY_SEGMENT + ) + } + + #[test] + fn test_keys_from_eth_address() { + let keys: Keys = (&DAI_ERC20_ETH_ADDRESS).into(); + assert_matches!( + &keys.prefix.segments[..], + [ + DbKeySeg::AddressSeg(multitoken_addr), + DbKeySeg::StringSeg(multitoken_path), + DbKeySeg::StringSeg(token_id), + ] if multitoken_addr == &ADDRESS && + multitoken_path == MULTITOKEN_KEY_SEGMENT && + token_id == &DAI_ERC20_ETH_ADDRESS_CHECKSUMMED.to_ascii_lowercase() + ) + } + + #[test] + fn test_keys_balance() { + let keys: Keys = (&DAI_ERC20_ETH_ADDRESS).into(); + let key = + keys.balance(&Address::from_str(ARBITRARY_OWNER_ADDRESS).unwrap()); + assert_matches!( + &key.segments[..], + [ + DbKeySeg::AddressSeg(multitoken_addr), + DbKeySeg::StringSeg(multitoken_path), + DbKeySeg::StringSeg(token_id), + DbKeySeg::StringSeg(balance_key_seg), + DbKeySeg::AddressSeg(owner_addr), + ] if multitoken_addr == &ADDRESS && + multitoken_path == MULTITOKEN_KEY_SEGMENT && + token_id == &DAI_ERC20_ETH_ADDRESS_CHECKSUMMED.to_ascii_lowercase() && + balance_key_seg == BALANCE_KEY_SEGMENT && + owner_addr == &Address::decode(ARBITRARY_OWNER_ADDRESS).unwrap() + ) + } + + #[test] + fn test_keys_balance_to_string() { + let keys: Keys = (&DAI_ERC20_ETH_ADDRESS).into(); + let key = + keys.balance(&Address::from_str(ARBITRARY_OWNER_ADDRESS).unwrap()); + assert_eq!( + "#atest1v9hx7w36g42ysgzzwf5kgem9ypqkgerjv4ehxgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpq8f99ew/ERC20/0x6b175474e89094c44da98b954eedeac495271d0f/balance/#atest1d9khqw36x9zyxwfhgfpygv2pgc65gse4gy6rjs34gfzr2v69gy6y23zpggurjv2yx5m52sesu6r4y4", + key.to_string() + ) + } + + #[test] + fn test_keys_supply() { + let keys: Keys = (&DAI_ERC20_ETH_ADDRESS).into(); + let key = keys.supply(); + assert_matches!( + &key.segments[..], + [ + DbKeySeg::AddressSeg(multitoken_addr), + DbKeySeg::StringSeg(multitoken_path), + DbKeySeg::StringSeg(token_id), + DbKeySeg::StringSeg(supply_key_seg), + ] if multitoken_addr == &ADDRESS && + multitoken_path == MULTITOKEN_KEY_SEGMENT && + token_id == &DAI_ERC20_ETH_ADDRESS_CHECKSUMMED.to_ascii_lowercase() && + supply_key_seg == SUPPLY_KEY_SEGMENT + ) + } + + #[test] + fn test_keys_supply_to_string() { + let keys: Keys = (&DAI_ERC20_ETH_ADDRESS).into(); + let key = keys.supply(); + assert_eq!( + "#atest1v9hx7w36g42ysgzzwf5kgem9ypqkgerjv4ehxgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpq8f99ew/ERC20/0x6b175474e89094c44da98b954eedeac495271d0f/supply", + key.to_string(), + ) + } + + #[test] + fn test_from_multitoken_key_for_key() { + // supply key + let wdai_supply = Key { + asset: DAI_ERC20_ETH_ADDRESS, + suffix: KeyType::Supply, + }; + let key: storage::Key = (&wdai_supply).into(); + assert_matches!( + &key.segments[..], + [ + DbKeySeg::AddressSeg(multitoken_addr), + DbKeySeg::StringSeg(multitoken_path), + DbKeySeg::StringSeg(token_id), + DbKeySeg::StringSeg(supply_key_seg), + ] if multitoken_addr == &ADDRESS && + multitoken_path == MULTITOKEN_KEY_SEGMENT && + token_id == &DAI_ERC20_ETH_ADDRESS_CHECKSUMMED.to_ascii_lowercase() && + supply_key_seg == SUPPLY_KEY_SEGMENT + ); + + // balance key + let wdai_balance = Key { + asset: DAI_ERC20_ETH_ADDRESS, + suffix: KeyType::Balance { + owner: Address::from_str(ARBITRARY_OWNER_ADDRESS).unwrap(), + }, + }; + let key: storage::Key = (&wdai_balance).into(); + assert_matches!( + &key.segments[..], + [ + DbKeySeg::AddressSeg(multitoken_addr), + DbKeySeg::StringSeg(multitoken_path), + DbKeySeg::StringSeg(token_id), + DbKeySeg::StringSeg(balance_key_seg), + DbKeySeg::AddressSeg(owner_addr), + ] if multitoken_addr == &ADDRESS && + multitoken_path == MULTITOKEN_KEY_SEGMENT && + token_id == &DAI_ERC20_ETH_ADDRESS_CHECKSUMMED.to_ascii_lowercase() && + balance_key_seg == BALANCE_KEY_SEGMENT && + owner_addr == &Address::decode(ARBITRARY_OWNER_ADDRESS).unwrap() + ); + } + + #[test] + fn test_try_from_key_for_multitoken_key_supply() { + // supply key + let key = storage::Key::from_str(&format!( + "#{}/ERC20/{}/supply", + ADDRESS, + DAI_ERC20_ETH_ADDRESS_CHECKSUMMED.to_ascii_lowercase(), + )) + .expect("Should be able to construct key for test"); + + let result: Result = Key::try_from(&key); + + let mt_key = match result { + Ok(mt_key) => mt_key, + Err(error) => { + panic!( + "Could not convert key {:?} to MultitokenKey: {:?}", + key, error + ) + } + }; + + assert_eq!(mt_key.asset, DAI_ERC20_ETH_ADDRESS); + assert_eq!(mt_key.suffix, KeyType::Supply); + } + + #[test] + fn test_try_from_key_for_multitoken_key_balance() { + // supply key + let key = storage::Key::from_str(&format!( + "#{}/ERC20/{}/balance/#{}", + ADDRESS, + DAI_ERC20_ETH_ADDRESS_CHECKSUMMED.to_ascii_lowercase(), + ARBITRARY_OWNER_ADDRESS + )) + .expect("Should be able to construct key for test"); + + let result: Result = Key::try_from(&key); + + let mt_key = match result { + Ok(mt_key) => mt_key, + Err(error) => { + panic!( + "Could not convert key {:?} to MultitokenKey: {:?}", + key, error + ) + } + }; + + assert_eq!(mt_key.asset, DAI_ERC20_ETH_ADDRESS); + assert_eq!( + mt_key.suffix, + KeyType::Balance { + owner: Address::from_str(ARBITRARY_OWNER_ADDRESS).unwrap() + } + ); + } + + #[test] + fn test_has_erc20_segment() { + let key = storage::Key::from_str(&format!( + "#{}/ERC20/{}/balance/#{}", + ADDRESS, + DAI_ERC20_ETH_ADDRESS_CHECKSUMMED.to_ascii_lowercase(), + ARBITRARY_OWNER_ADDRESS + )) + .expect("Should be able to construct key for test"); + + assert!(has_erc20_segment(&key)); + + let key = storage::Key::from_str(&format!( + "#{}/ERC20/{}/supply", + ADDRESS, + DAI_ERC20_ETH_ADDRESS_CHECKSUMMED.to_ascii_lowercase(), + )) + .expect("Should be able to construct key for test"); + + assert!(has_erc20_segment(&key)); + + let key = storage::Key::from_str(&format!("#{}/ERC20", ADDRESS)) + .expect("Should be able to construct key for test"); + + assert!(has_erc20_segment(&key)); + } +} diff --git a/core/src/ledger/governance/parameters.rs b/core/src/ledger/governance/parameters.rs index 71dca8c91b..68c0744ff5 100644 --- a/core/src/ledger/governance/parameters.rs +++ b/core/src/ledger/governance/parameters.rs @@ -69,7 +69,7 @@ impl GovParams { pub fn init_storage(&self, storage: &mut Storage) where DB: storage::DB + for<'iter> storage::DBIter<'iter>, - H: storage::StorageHasher, + H: storage::traits::StorageHasher, { let Self { min_proposal_fund, diff --git a/core/src/ledger/mod.rs b/core/src/ledger/mod.rs index 83568c0da7..9a929cccd3 100644 --- a/core/src/ledger/mod.rs +++ b/core/src/ledger/mod.rs @@ -1,5 +1,6 @@ //! The ledger modules +pub mod eth_bridge; pub mod gas; pub mod governance; #[cfg(any(feature = "abciplus", feature = "abcipp"))] diff --git a/core/src/ledger/parameters/mod.rs b/core/src/ledger/parameters/mod.rs index cb84bd56e7..fb48f147e3 100644 --- a/core/src/ledger/parameters/mod.rs +++ b/core/src/ledger/parameters/mod.rs @@ -9,6 +9,7 @@ use super::storage::types::{decode, encode}; use super::storage::{types, Storage}; use crate::ledger::storage::{self as ledger_storage}; use crate::types::address::{Address, InternalAddress}; +use crate::types::chain::ProposalBytes; use crate::types::storage::Key; use crate::types::time::DurationSecs; @@ -32,6 +33,8 @@ pub struct Parameters { pub epoch_duration: EpochDuration, /// Maximum expected time per block (read only) pub max_expected_time_per_block: DurationSecs, + /// Max payload size, in bytes, for a tx batch proposal. + pub max_proposal_bytes: ProposalBytes, /// Whitelisted validity predicate hashes (read only) pub vp_whitelist: Vec, /// Whitelisted tx hashes (read only) @@ -101,6 +104,7 @@ impl Parameters { let Self { epoch_duration, max_expected_time_per_block, + max_proposal_bytes, vp_whitelist, tx_whitelist, implicit_vp, @@ -111,6 +115,16 @@ impl Parameters { pos_inflation_amount, } = self; + // write max proposal bytes parameter + let max_proposal_bytes_key = storage::get_max_proposal_bytes_key(); + let max_proposal_bytes_value = encode(&max_proposal_bytes); + storage + .write(&max_proposal_bytes_key, max_proposal_bytes_value) + .expect( + "Max proposal bytes parameter must be initialized in the \ + genesis block", + ); + // write epoch parameters let epoch_key = storage::get_epoch_duration_storage_key(); let epoch_value = encode(epoch_duration); @@ -373,7 +387,7 @@ where Ok((epoch_duration, gas)) } -// Read the all the parameters from storage. Returns the parameters and gas +/// Read the all the parameters from storage. Returns the parameters and gas /// cost. pub fn read( storage: &Storage, @@ -382,6 +396,17 @@ where DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, H: ledger_storage::StorageHasher, { + // read max proposal bytes + let (max_proposal_bytes, gas_proposal_bytes) = { + let key = storage::get_max_proposal_bytes_key(); + let (value, gas) = + storage.read(&key).map_err(ReadError::StorageError)?; + let value: ProposalBytes = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + (value, gas) + }; + // read epoch duration let (epoch_duration, gas_epoch) = read_epoch_duration_parameter(storage) .expect("Couldn't read epoch duration parameters"); @@ -464,10 +489,31 @@ where decode(value.ok_or(ReadError::ParametersMissing)?) .map_err(ReadError::StorageTypeError)?; + let total_gas_cost = [ + gas_epoch, + gas_tx, + gas_vp, + gas_time, + gas_implicit_vp, + gas_epy, + gas_gain_p, + gas_gain_d, + gas_staked, + gas_reward, + gas_proposal_bytes, + ] + .into_iter() + .fold(0u64, |accum, gas| { + accum + .checked_add(gas) + .expect("u64 overflow occurred while doing gas arithmetic") + }); + Ok(( Parameters { epoch_duration, max_expected_time_per_block, + max_proposal_bytes, vp_whitelist, tx_whitelist, implicit_vp, @@ -477,15 +523,6 @@ where staked_ratio, pos_inflation_amount, }, - gas_epoch - + gas_tx - + gas_vp - + gas_time - + gas_implicit_vp - + gas_epy - + gas_gain_p - + gas_gain_d - + gas_staked - + gas_reward, + total_gas_cost, )) } diff --git a/core/src/ledger/parameters/storage.rs b/core/src/ledger/parameters/storage.rs index b8dc84fd76..ed16bacbcf 100644 --- a/core/src/ledger/parameters/storage.rs +++ b/core/src/ledger/parameters/storage.rs @@ -12,6 +12,7 @@ const POS_GAIN_P_KEY: &str = "pos_gain_p"; const POS_GAIN_D_KEY: &str = "pos_gain_d"; const STAKED_RATIO_KEY: &str = "staked_ratio_key"; const POS_INFLATION_AMOUNT_KEY: &str = "pos_inflation_amount_key"; +const MAX_PROPOSAL_BYTES_KEY: &str = "max_proposal_bytes"; /// Returns if the key is a parameter key. pub fn is_parameter_key(key: &Key) -> bool { @@ -20,10 +21,19 @@ pub fn is_parameter_key(key: &Key) -> bool { /// Returns if the key is a protocol parameter key. pub fn is_protocol_parameter_key(key: &Key) -> bool { + // TODO: improve this code; use some kind of prefix + // tree to efficiently match `key` is_epoch_duration_storage_key(key) || is_max_expected_time_per_block_key(key) || is_tx_whitelist_key(key) || is_vp_whitelist_key(key) + || is_implicit_vp_key(key) + || is_epochs_per_year_key(key) + || is_pos_gain_p_key(key) + || is_pos_gain_d_key(key) + || is_staked_ratio_key(key) + || is_pos_inflation_amount_key(key) + || is_max_proposal_bytes_key(key) } /// Returns if the key is an epoch storage key. @@ -106,6 +116,14 @@ pub fn is_pos_inflation_amount_key(key: &Key) -> bool { ] if addr == &ADDRESS && pos_inflation_amount == POS_INFLATION_AMOUNT_KEY) } +/// Returns if the key is the max proposal bytes key. +pub fn is_max_proposal_bytes_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(max_proposal_bytes), + ] if addr == &ADDRESS && max_proposal_bytes == MAX_PROPOSAL_BYTES_KEY) +} + /// Storage key used for epoch parameter. pub fn get_epoch_duration_storage_key() -> Key { Key { @@ -205,3 +223,13 @@ pub fn get_pos_inflation_amount_key() -> Key { ], } } + +/// Storage key used for the max proposal bytes. +pub fn get_max_proposal_bytes_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(MAX_PROPOSAL_BYTES_KEY.to_string()), + ], + } +} diff --git a/core/src/ledger/storage/merkle_tree.rs b/core/src/ledger/storage/merkle_tree.rs index dc65a12540..26b4f53ee7 100644 --- a/core/src/ledger/storage/merkle_tree.rs +++ b/core/src/ledger/storage/merkle_tree.rs @@ -14,13 +14,17 @@ use thiserror::Error; use super::traits::{StorageHasher, SubTreeRead, SubTreeWrite}; use crate::bytes::ByteBuf; -use crate::ledger::storage::ics23_specs::{self, ibc_leaf_spec}; -use crate::ledger::storage::types; +use crate::ledger::eth_bridge::storage::bridge_pool::{ + get_signed_root_key, BridgePoolTree, +}; +use crate::ledger::storage::ics23_specs::ibc_leaf_spec; +use crate::ledger::storage::{ics23_specs, types}; use crate::types::address::{Address, InternalAddress}; use crate::types::hash::Hash; +use crate::types::keccak::KeccakHash; use crate::types::storage::{ - self, DbKeySeg, Error as StorageError, Key, StringKey, TreeBytes, - TreeKeyError, IBC_KEY_LIMIT, + self, DbKeySeg, Error as StorageError, Key, MembershipProof, StringKey, + TreeBytes, TreeKeyError, IBC_KEY_LIMIT, }; #[allow(missing_docs)] @@ -54,10 +58,13 @@ type Result = std::result::Result; /// Type alias for bytes to be put into the Merkle storage pub(super) type StorageBytes<'a> = &'a [u8]; -/// Type aliases for the different merkle trees and backing stores +// Type aliases for the different merkle trees and backing stores +/// Sparse-merkle-tree store pub type SmtStore = DefaultStore; /// Arse-merkle-tree store pub type AmtStore = DefaultStore; +/// Bridge pool store +pub type BridgePoolStore = std::collections::BTreeSet; /// Sparse-merkle-tree pub type Smt = ArseMerkleTree; /// Arse-merkle-tree @@ -85,6 +92,8 @@ pub enum StoreType { Ibc, /// For PoS-related data PoS, + /// For the Ethereum bridge Pool transfers + BridgePool, } /// Backing storage for merkle trees @@ -97,6 +106,8 @@ pub enum Store { Ibc(AmtStore), /// For PoS-related data PoS(SmtStore), + /// For the Ethereum bridge Pool transfers + BridgePool(BridgePoolStore), } impl Store { @@ -107,6 +118,7 @@ impl Store { Self::Account(store) => StoreRef::Account(store), Self::Ibc(store) => StoreRef::Ibc(store), Self::PoS(store) => StoreRef::PoS(store), + Self::BridgePool(store) => StoreRef::BridgePool(store), } } } @@ -121,26 +133,30 @@ pub enum StoreRef<'a> { Ibc(&'a AmtStore), /// For PoS-related data PoS(&'a SmtStore), + /// For the Ethereum bridge Pool transfers + BridgePool(&'a BridgePoolStore), } impl<'a> StoreRef<'a> { - /// Convert to an owned `Store`. + /// Get owned copies of backing stores of our Merkle tree. pub fn to_owned(&self) -> Store { match *self { Self::Base(store) => Store::Base(store.to_owned()), Self::Account(store) => Store::Account(store.to_owned()), Self::Ibc(store) => Store::Ibc(store.to_owned()), Self::PoS(store) => Store::PoS(store.to_owned()), + Self::BridgePool(store) => Store::BridgePool(store.to_owned()), } } - /// Encode a `StoreRef`. + /// Borsh Seriliaze the backing stores of our Merkle tree. pub fn encode(&self) -> Vec { match self { Self::Base(store) => store.try_to_vec(), Self::Account(store) => store.try_to_vec(), Self::Ibc(store) => store.try_to_vec(), Self::PoS(store) => store.try_to_vec(), + Self::BridgePool(store) => store.try_to_vec(), } .expect("Serialization failed") } @@ -149,11 +165,12 @@ impl<'a> StoreRef<'a> { impl StoreType { /// Get an iterator for the base tree and subtrees pub fn iter() -> std::slice::Iter<'static, Self> { - static SUB_TREE_TYPES: [StoreType; 4] = [ + static SUB_TREE_TYPES: [StoreType; 5] = [ StoreType::Base, StoreType::Account, StoreType::PoS, StoreType::Ibc, + StoreType::BridgePool, ]; SUB_TREE_TYPES.iter() } @@ -171,6 +188,15 @@ impl StoreType { InternalAddress::Ibc => { Ok((StoreType::Ibc, key.sub_key()?)) } + InternalAddress::EthBridgePool => { + // the root of this sub-tree is kept in accounts + // storage along with a quorum of validator signatures + if *key == get_signed_root_key() { + Ok((StoreType::Account, key.clone())) + } else { + Ok((StoreType::BridgePool, key.sub_key()?)) + } + } // use the same key for Parameters _ => Ok((StoreType::Account, key.clone())), } @@ -199,6 +225,9 @@ impl StoreType { Self::PoS => Ok(Store::PoS( types::decode(bytes).map_err(Error::CodingError)?, )), + Self::BridgePool => Ok(Store::BridgePool( + types::decode(bytes).map_err(Error::CodingError)?, + )), } } } @@ -212,6 +241,7 @@ impl FromStr for StoreType { "account" => Ok(StoreType::Account), "ibc" => Ok(StoreType::Ibc), "pos" => Ok(StoreType::PoS), + "eth_bridge_pool" => Ok(StoreType::BridgePool), _ => Err(Error::StoreType(s.to_string())), } } @@ -224,6 +254,7 @@ impl fmt::Display for StoreType { StoreType::Account => write!(f, "account"), StoreType::Ibc => write!(f, "ibc"), StoreType::PoS => write!(f, "pos"), + StoreType::BridgePool => write!(f, "eth_bridge_pool"), } } } @@ -235,6 +266,7 @@ pub struct MerkleTree { account: Smt, ibc: Amt, pos: Smt, + bridge_pool: BridgePoolTree, } impl core::fmt::Debug for MerkleTree { @@ -253,11 +285,14 @@ impl MerkleTree { let account = Smt::new(stores.account.0.into(), stores.account.1); let ibc = Amt::new(stores.ibc.0.into(), stores.ibc.1); let pos = Smt::new(stores.pos.0.into(), stores.pos.1); + let bridge_pool = + BridgePoolTree::new(stores.bridge_pool.0, stores.bridge_pool.1); Self { base, account, ibc, pos, + bridge_pool, } } @@ -267,6 +302,7 @@ impl MerkleTree { StoreType::Account => Box::new(&self.account), StoreType::Ibc => Box::new(&self.ibc), StoreType::PoS => Box::new(&self.pos), + StoreType::BridgePool => Box::new(&self.bridge_pool), } } @@ -279,6 +315,7 @@ impl MerkleTree { StoreType::Account => Box::new(&mut self.account), StoreType::Ibc => Box::new(&mut self.ibc), StoreType::PoS => Box::new(&mut self.pos), + StoreType::BridgePool => Box::new(&mut self.bridge_pool), } } @@ -334,6 +371,10 @@ impl MerkleTree { account: (self.account.root().into(), self.account.store()), ibc: (self.ibc.root().into(), self.ibc.store()), pos: (self.pos.root().into(), self.pos.store()), + bridge_pool: ( + self.bridge_pool.root().into(), + self.bridge_pool.store(), + ), } } @@ -461,6 +502,7 @@ pub struct MerkleTreeStoresRead { account: (Hash, SmtStore), ibc: (Hash, AmtStore), pos: (Hash, SmtStore), + bridge_pool: (KeccakHash, BridgePoolStore), } impl MerkleTreeStoresRead { @@ -471,6 +513,7 @@ impl MerkleTreeStoresRead { StoreType::Account => self.account.0 = root, StoreType::Ibc => self.ibc.0 = root, StoreType::PoS => self.pos.0 = root, + StoreType::BridgePool => self.bridge_pool.0 = root.into(), } } @@ -481,6 +524,18 @@ impl MerkleTreeStoresRead { Store::Account(store) => self.account.1 = store, Store::Ibc(store) => self.ibc.1 = store, Store::PoS(store) => self.pos.1 = store, + Store::BridgePool(store) => self.bridge_pool.1 = store, + } + } + + /// Read the backing store of the requested type + pub fn get_store(&self, store_type: StoreType) -> StoreRef { + match store_type { + StoreType::Base => StoreRef::Base(&self.base.1), + StoreType::Account => StoreRef::Account(&self.account.1), + StoreType::Ibc => StoreRef::Ibc(&self.ibc.1), + StoreType::PoS => StoreRef::PoS(&self.pos.1), + StoreType::BridgePool => StoreRef::BridgePool(&self.bridge_pool.1), } } } @@ -491,6 +546,7 @@ pub struct MerkleTreeStoresWrite<'a> { account: (Hash, &'a SmtStore), ibc: (Hash, &'a AmtStore), pos: (Hash, &'a SmtStore), + bridge_pool: (Hash, &'a BridgePoolStore), } impl<'a> MerkleTreeStoresWrite<'a> { @@ -501,6 +557,7 @@ impl<'a> MerkleTreeStoresWrite<'a> { StoreType::Account => &self.account.0, StoreType::Ibc => &self.ibc.0, StoreType::PoS => &self.pos.0, + StoreType::BridgePool => &self.bridge_pool.0, } } @@ -511,6 +568,7 @@ impl<'a> MerkleTreeStoresWrite<'a> { StoreType::Account => StoreRef::Account(self.account.1), StoreType::Ibc => StoreRef::Ibc(self.ibc.1), StoreType::PoS => StoreRef::PoS(self.pos.1), + StoreType::BridgePool => StoreRef::BridgePool(self.bridge_pool.1), } } } @@ -527,18 +585,6 @@ impl From for Error { } } -/// Type of membership proof from a merkle tree -pub enum MembershipProof { - /// ICS23 compliant membership proof - ICS23(CommitmentProof), -} - -impl From for MembershipProof { - fn from(proof: CommitmentProof) -> Self { - Self::ICS23(proof) - } -} - /// A storage key existence or non-existence proof #[derive(Debug)] pub struct Proof { @@ -718,12 +764,16 @@ mod test { tree.update(&pos_key, pos_val).unwrap(); let specs = ibc_proof_specs::(); - let MembershipProof::ICS23(proof) = tree + let proof = match tree .get_sub_tree_existence_proof( std::array::from_ref(&ibc_key), vec![&ibc_val], ) - .unwrap(); + .unwrap() + { + MembershipProof::ICS23(proof) => proof, + _ => panic!("Test failed"), + }; let proof = tree.get_sub_tree_proof(&ibc_key, proof).unwrap(); let (store_type, sub_key) = StoreType::sub_key(&ibc_key).unwrap(); let paths = vec![sub_key.to_string(), store_type.to_string()]; @@ -775,12 +825,17 @@ mod test { tree.update(&pos_key, pos_val.clone()).unwrap(); let specs = proof_specs::(); - let MembershipProof::ICS23(proof) = tree + let proof = match tree .get_sub_tree_existence_proof( std::array::from_ref(&pos_key), vec![&pos_val], ) - .unwrap(); + .unwrap() + { + MembershipProof::ICS23(proof) => proof, + _ => panic!("Test failed"), + }; + let proof = tree.get_sub_tree_proof(&pos_key, proof).unwrap(); let (store_type, sub_key) = StoreType::sub_key(&pos_key).unwrap(); let paths = vec![sub_key.to_string(), store_type.to_string()]; diff --git a/core/src/ledger/storage/mockdb.rs b/core/src/ledger/storage/mockdb.rs index 011d8faac8..950084acc8 100644 --- a/core/src/ledger/storage/mockdb.rs +++ b/core/src/ledger/storage/mockdb.rs @@ -352,12 +352,15 @@ impl DB for MockDB { fn read_subspace_val_with_height( &self, - _key: &Key, + key: &Key, _height: BlockHeight, _last_height: BlockHeight, ) -> Result>> { - // Mock DB can read only the latest value for now - unimplemented!() + tracing::warn!( + "read_subspace_val_with_height is not implemented, will read \ + subspace value from latest height" + ); + self.read_subspace_val(key) } fn write_subspace_val( diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index 8285e58bab..fadf781ff7 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -17,8 +17,7 @@ use masp_primitives::merkle_tree::FrozenCommitmentTree; use masp_primitives::sapling::Node; use merkle_tree::StorageBytes; pub use merkle_tree::{ - MembershipProof, MerkleTree, MerkleTreeStoresRead, MerkleTreeStoresWrite, - StoreType, + MerkleTree, MerkleTreeStoresRead, MerkleTreeStoresWrite, StoreType, }; #[cfg(feature = "wasm-runtime")] use rayon::iter::{ @@ -620,8 +619,12 @@ where (self.block.hash.clone(), BLOCK_HASH_LENGTH as _) } - /// Get the existence proof - #[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] + /// Get a Tendermint-compatible existence proof. + /// + /// Proofs from the Ethereum bridge pool are not + /// Tendermint-compatible. Requesting for a key + /// belonging to the bridge pool will cause this + /// method to error. pub fn get_existence_proof( &self, key: &Key, @@ -630,30 +633,42 @@ where ) -> Result { use std::array; + use crate::types::storage::MembershipProof; + if height >= self.get_block_height().0 { - let MembershipProof::ICS23(proof) = self + if let MembershipProof::ICS23(proof) = self .block .tree .get_sub_tree_existence_proof(array::from_ref(key), vec![value]) - .map_err(Error::MerkleTreeError)?; - self.block - .tree - .get_sub_tree_proof(key, proof) - .map(Into::into) - .map_err(Error::MerkleTreeError) + .map_err(Error::MerkleTreeError)? + { + self.block + .tree + .get_sub_tree_proof(key, proof) + .map(Into::into) + .map_err(Error::MerkleTreeError) + } else { + Err(Error::MerkleTreeError(MerkleTreeError::TendermintProof)) + } } else { match self.db.read_merkle_tree_stores(height)? { Some(stores) => { let tree = MerkleTree::::new(stores); - let MembershipProof::ICS23(proof) = tree + if let MembershipProof::ICS23(proof) = tree .get_sub_tree_existence_proof( array::from_ref(key), vec![value], ) - .map_err(Error::MerkleTreeError)?; - tree.get_sub_tree_proof(key, proof) - .map(Into::into) - .map_err(Error::MerkleTreeError) + .map_err(Error::MerkleTreeError)? + { + tree.get_sub_tree_proof(key, proof) + .map(Into::into) + .map_err(Error::MerkleTreeError) + } else { + Err(Error::MerkleTreeError( + MerkleTreeError::TendermintProof, + )) + } } None => Err(Error::NoMerkleTree { height }), } @@ -661,7 +676,6 @@ where } /// Get the non-existence proof - #[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] pub fn get_non_existence_proof( &self, key: &Key, @@ -1279,6 +1293,7 @@ mod tests { ..Default::default() }; let mut parameters = Parameters { + max_proposal_bytes: Default::default(), epoch_duration: epoch_duration.clone(), max_expected_time_per_block: Duration::seconds(max_expected_time_per_block).into(), vp_whitelist: vec![], diff --git a/core/src/ledger/storage/traits.rs b/core/src/ledger/storage/traits.rs index dc5c18a4a3..6e109ee53e 100644 --- a/core/src/ledger/storage/traits.rs +++ b/core/src/ledger/storage/traits.rs @@ -5,15 +5,20 @@ use std::fmt; use arse_merkle_tree::traits::{Hasher, Value}; use arse_merkle_tree::{Key as TreeKey, H256}; +use borsh::BorshDeserialize; use ics23::commitment_proof::Proof as Ics23Proof; use ics23::{CommitmentProof, ExistenceProof}; use sha2::{Digest, Sha256}; use super::ics23_specs; -use super::merkle_tree::{Amt, Error, MembershipProof, Smt}; +use super::merkle_tree::{Amt, Error, Smt}; +use crate::ledger::eth_bridge::storage::bridge_pool::BridgePoolTree; use crate::ledger::storage::merkle_tree::StorageBytes; +use crate::types::eth_bridge_pool::PendingTransfer; use crate::types::hash::Hash; -use crate::types::storage::{Key, StringKey, TreeBytes}; +use crate::types::storage::{ + Key, MembershipProof, StringKey, TreeBytes, IBC_KEY_LIMIT, +}; /// Trait for reading from a merkle tree that is a sub-tree /// of the global merkle tree. @@ -154,6 +159,112 @@ impl<'a, H: StorageHasher + Default> SubTreeWrite for &'a mut Amt { } } +impl<'a> SubTreeRead for &'a BridgePoolTree { + fn subtree_has_key(&self, key: &Key) -> Result { + self.contains_key(key) + .map_err(|err| Error::MerkleTree(err.to_string())) + } + + fn subtree_membership_proof( + &self, + _: &[Key], + values: Vec, + ) -> Result { + let values = values + .iter() + .filter_map(|val| PendingTransfer::try_from_slice(val).ok()) + .collect(); + self.get_membership_proof(values) + .map(Into::into) + .map_err(|err| Error::MerkleTree(err.to_string())) + } +} + +impl<'a> SubTreeWrite for &'a mut BridgePoolTree { + fn subtree_update( + &mut self, + key: &Key, + _: StorageBytes, + ) -> Result { + self.insert_key(key) + .map_err(|err| Error::MerkleTree(err.to_string())) + } + + fn subtree_delete(&mut self, key: &Key) -> Result { + self.delete_key(key) + .map_err(|err| Error::MerkleTree(err.to_string()))?; + Ok(self.root().into()) + } +} + +impl TreeKey for StringKey { + type Error = Error; + + fn as_slice(&self) -> &[u8] { + &self.original.as_slice()[..self.length] + } + + fn try_from_bytes(bytes: &[u8]) -> Result { + let mut tree_key = [0u8; IBC_KEY_LIMIT]; + let mut original = [0u8; IBC_KEY_LIMIT]; + let mut length = 0; + for (i, byte) in bytes.iter().enumerate() { + if i >= IBC_KEY_LIMIT { + return Err(Error::InvalidMerkleKey( + "Input IBC key is too large".into(), + )); + } + original[i] = *byte; + tree_key[i] = byte.wrapping_add(1); + length += 1; + } + Ok(Self { + original, + tree_key: tree_key.into(), + length, + }) + } +} + +impl Value for Hash { + fn as_slice(&self) -> &[u8] { + self.0.as_slice() + } + + fn zero() -> Self { + Hash([0u8; 32]) + } +} + +impl From for H256 { + fn from(hash: Hash) -> Self { + hash.0.into() + } +} + +impl From for Hash { + fn from(hash: H256) -> Self { + Self(hash.into()) + } +} + +impl From<&H256> for Hash { + fn from(hash: &H256) -> Self { + let hash = hash.to_owned(); + Self(hash.into()) + } +} + +impl Value for TreeBytes { + fn as_slice(&self) -> &[u8] { + self.0.as_slice() + } + + fn zero() -> Self { + TreeBytes::zero() + } +} + /// The storage hasher used for the merkle tree. pub trait StorageHasher: Hasher + Default { /// Hash the value to store diff --git a/core/src/lib.rs b/core/src/lib.rs index c9bd40084e..44ca420409 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -7,6 +7,7 @@ #![deny(rustdoc::private_intra_doc_links)] pub mod bytes; +pub mod hints; pub mod ledger; pub mod proto; pub mod types; diff --git a/core/src/proto/mod.rs b/core/src/proto/mod.rs index 3271037595..215e76ac45 100644 --- a/core/src/proto/mod.rs +++ b/core/src/proto/mod.rs @@ -3,7 +3,7 @@ pub mod generated; mod types; -pub use types::{Dkg, Error, Signed, SignedTxData, Tx}; +pub use types::{Dkg, Error, Signed, SignedSerialize, SignedTxData, Tx}; #[cfg(test)] mod tests { diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index d7fbb49aad..a34fddf414 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -1,6 +1,9 @@ +use std::collections::HashMap; use std::convert::{TryFrom, TryInto}; use std::hash::{Hash, Hasher}; +use std::marker::PhantomData; +use borsh::schema::{Declaration, Definition}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use prost::Message; use serde::{Deserialize, Serialize}; @@ -53,61 +56,109 @@ pub struct SignedTxData { pub sig: common::Signature, } -/// A generic signed data wrapper for Borsh encode-able data. +/// A serialization method to provide to [`Signed`], such +/// that we may sign serialized data. +pub trait SignedSerialize { + /// A byte vector containing the serialized data. + type Output: AsRef<[u8]>; + + /// Encodes `data` as a byte vector, with some arbitrary serialization + /// method. + /// + /// The returned output *must* be deterministic based on + /// `data`, so that two callers signing the same `data` will be + /// signing the same `Self::Output`. + fn serialize(data: &T) -> Self::Output; +} + +/// Tag type that indicates we should use [`BorshSerialize`] +/// to sign data in a [`Signed`] wrapper. +#[derive(Eq, PartialEq, Clone, Debug, Serialize, Deserialize)] +pub struct SerializeWithBorsh; + +impl SignedSerialize for SerializeWithBorsh { + type Output = Vec; + + fn serialize(data: &T) -> Vec { + data.try_to_vec() + .expect("Encoding data for signing shouldn't fail") + } +} + +/// A generic signed data wrapper for serialize-able types. +/// +/// The default serialization method is [`BorshSerialize`]. #[derive( Clone, Debug, BorshSerialize, BorshDeserialize, Serialize, Deserialize, )] -pub struct Signed { +pub struct Signed { /// Arbitrary data to be signed pub data: T, /// The signature of the data pub sig: common::Signature, + /// The method to serialize the data with, + /// before it being signed + _serialization: PhantomData, } -impl PartialEq for Signed -where - T: BorshSerialize + BorshDeserialize + PartialEq, -{ +impl Eq for Signed {} + +impl PartialEq for Signed { fn eq(&self, other: &Self) -> bool { self.data == other.data && self.sig == other.sig } } -impl Eq for Signed where - T: BorshSerialize + BorshDeserialize + Eq + PartialEq -{ -} - -impl Hash for Signed -where - T: BorshSerialize + BorshDeserialize + Hash, -{ +impl Hash for Signed { fn hash(&self, state: &mut H) { self.data.hash(state); self.sig.hash(state); } } -impl PartialOrd for Signed -where - T: BorshSerialize + BorshDeserialize + PartialOrd, -{ +impl PartialOrd for Signed { fn partial_cmp(&self, other: &Self) -> Option { self.data.partial_cmp(&other.data) } } -impl Signed -where - T: BorshSerialize + BorshDeserialize, -{ - /// Initialize a new signed data. +impl BorshSchema for Signed { + fn add_definitions_recursively( + definitions: &mut HashMap, + ) { + let fields = borsh::schema::Fields::NamedFields(borsh::maybestd::vec![ + ("data".to_string(), T::declaration()), + ("sig".to_string(), ::declaration()) + ]); + let definition = borsh::schema::Definition::Struct { fields }; + Self::add_definition(Self::declaration(), definition, definitions); + T::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + } + + fn declaration() -> borsh::schema::Declaration { + format!("Signed<{}>", T::declaration()) + } +} + +impl Signed { + /// Initialize a new [`Signed`] instance from an existing signature. + #[inline] + pub fn new_from(data: T, sig: common::Signature) -> Self { + Self { + data, + sig, + _serialization: PhantomData, + } + } +} + +impl> Signed { + /// Initialize a new [`Signed`] instance. pub fn new(keypair: &common::SecretKey, data: T) -> Self { - let to_sign = data - .try_to_vec() - .expect("Encoding data for signing shouldn't fail"); - let sig = common::SigScheme::sign(keypair, to_sign); - Self { data, sig } + let to_sign = S::serialize(&data); + let sig = common::SigScheme::sign(keypair, to_sign.as_ref()); + Self::new_from(data, sig) } /// Verify that the data has been signed by the secret key @@ -116,11 +167,8 @@ where &self, pk: &common::PublicKey, ) -> std::result::Result<(), VerifySigError> { - let bytes = self - .data - .try_to_vec() - .expect("Encoding data for verifying signature shouldn't fail"); - common::SigScheme::verify_signature_raw(pk, &bytes, &self.sig) + let bytes = S::serialize(&self.data); + common::SigScheme::verify_signature_raw(pk, bytes.as_ref(), &self.sig) } } diff --git a/core/src/types/address.rs b/core/src/types/address.rs index 79a27a440f..6bc09aa391 100644 --- a/core/src/types/address.rs +++ b/core/src/types/address.rs @@ -12,6 +12,7 @@ use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use thiserror::Error; +use crate::types::ethereum_events::EthAddress; use crate::types::key; use crate::types::key::PublicKeyHash; @@ -69,6 +70,8 @@ mod internal { "ibc::IBC Mint Address "; pub const ETH_BRIDGE: &str = "ano::ETH Bridge Address "; + pub const ETH_BRIDGE_POOL: &str = + "ano::ETH Bridge Pool Address "; } /// Fixed-length address strings prefix for established addresses. @@ -198,6 +201,9 @@ impl Address { InternalAddress::EthBridge => { internal::ETH_BRIDGE.to_string() } + InternalAddress::EthBridgePool => { + internal::ETH_BRIDGE_POOL.to_string() + } }; debug_assert_eq!(string.len(), FIXED_LEN_STRING_BYTES); string @@ -251,6 +257,9 @@ impl Address { internal::ETH_BRIDGE => { Ok(Address::Internal(InternalAddress::EthBridge)) } + internal::ETH_BRIDGE_POOL => { + Ok(Address::Internal(InternalAddress::EthBridgePool)) + } _ => Err(Error::new( ErrorKind::InvalidData, "Invalid internal address", @@ -466,6 +475,8 @@ pub enum InternalAddress { SlashFund, /// Bridge to Ethereum EthBridge, + /// The pool of transactions to be relayed to Ethereum + EthBridgePool, } impl InternalAddress { @@ -500,6 +511,7 @@ impl Display for InternalAddress { Self::IbcBurn => "IbcBurn".to_string(), Self::IbcMint => "IbcMint".to_string(), Self::EthBridge => "EthBridge".to_string(), + Self::EthBridgePool => "EthBridgePool".to_string(), } ) } @@ -556,6 +568,16 @@ pub fn masp_tx_key() -> crate::types::key::common::SecretKey { common::SecretKey::try_from_slice(bytes.as_ref()).unwrap() } +/// Temporary helper for testing +pub const fn wnam() -> EthAddress { + // TODO: Replace this with the real wNam ERC20 address once it exists + // "DEADBEEF DEADBEEF DEADBEEF DEADBEEF DEADBEEF" + EthAddress([ + 222, 173, 190, 239, 222, 173, 190, 239, 222, 173, 190, 239, 222, 173, + 190, 239, 222, 173, 190, 239, + ]) +} + /// Temporary helper for testing, a hash map of tokens addresses with their /// informal currency codes. pub fn tokens() -> HashMap { @@ -771,8 +793,9 @@ pub mod testing { InternalAddress::IbcEscrow => {} InternalAddress::IbcBurn => {} InternalAddress::IbcMint => {} - InternalAddress::EthBridge => {} /* Add new addresses in the - * `prop_oneof` below. */ + InternalAddress::EthBridge => {} + InternalAddress::EthBridgePool => {} /* Add new addresses in the + * `prop_oneof` below. */ }; prop_oneof![ Just(InternalAddress::PoS), @@ -787,6 +810,7 @@ pub mod testing { Just(InternalAddress::Governance), Just(InternalAddress::SlashFund), Just(InternalAddress::EthBridge), + Just(InternalAddress::EthBridgePool), ] } diff --git a/core/src/types/chain.rs b/core/src/types/chain.rs index 06a5d3938c..7437793cfc 100644 --- a/core/src/types/chain.rs +++ b/core/src/types/chain.rs @@ -1,10 +1,11 @@ //! Chain related data types // TODO move BlockHash and BlockHeight here from the storage types -use std::fmt::Display; +use std::fmt; +use std::num::NonZeroU64; use std::str::FromStr; -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use thiserror::Error; @@ -16,6 +17,165 @@ pub const CHAIN_ID_PREFIX_MAX_LEN: usize = 19; /// Separator between chain ID prefix and the generated hash pub const CHAIN_ID_PREFIX_SEP: char = '.'; +/// Configuration parameter for the upper limit on the number +/// of bytes transactions can occupy in a block proposal. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Hash, + Debug, + BorshSerialize, + BorshDeserialize, +)] +pub struct ProposalBytes { + inner: NonZeroU64, +} + +impl Serialize for ProposalBytes { + fn serialize(&self, s: S) -> Result + where + S: serde::Serializer, + { + s.serialize_u64(self.inner.get()) + } +} + +impl<'de> Deserialize<'de> for ProposalBytes { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + struct Visitor; + + impl<'de> serde::de::Visitor<'de> for Visitor { + type Value = ProposalBytes; + + fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "a u64 in the range 1 - {}", + ProposalBytes::RAW_MAX.get() + ) + } + + fn visit_u64(self, size: u64) -> Result + where + E: serde::de::Error, + { + ProposalBytes::new(size).ok_or_else(|| { + serde::de::Error::invalid_value( + serde::de::Unexpected::Unsigned(size), + &self, + ) + }) + } + + // NOTE: this is only needed because of a bug in the toml parser + // https://github.com/toml-rs/toml-rs/issues/256 + fn visit_i64(self, size: i64) -> Result + where + E: serde::de::Error, + { + ProposalBytes::new(size as u64).ok_or_else(|| { + serde::de::Error::invalid_value( + serde::de::Unexpected::Signed(size), + &self, + ) + }) + } + } + + deserializer.deserialize_u64(Visitor) + } +} + +impl BorshSchema for ProposalBytes { + fn add_definitions_recursively( + definitions: &mut std::collections::HashMap< + borsh::schema::Declaration, + borsh::schema::Definition, + >, + ) { + let fields = borsh::schema::Fields::NamedFields(vec![( + "inner".into(), + u64::declaration(), + )]); + let definition = borsh::schema::Definition::Struct { fields }; + definitions.insert(Self::declaration(), definition); + } + + fn declaration() -> borsh::schema::Declaration { + std::any::type_name::().into() + } +} + +impl Default for ProposalBytes { + #[inline] + fn default() -> Self { + Self { + inner: Self::RAW_DEFAULT, + } + } +} + +// constants +impl ProposalBytes { + /// The upper bound of a [`ProposalBytes`] value. + pub const MAX: ProposalBytes = ProposalBytes { + inner: Self::RAW_MAX, + }; + /// The (raw) default value for a [`ProposalBytes`]. + /// + /// This value must be within the range `[1 B, 90 MiB]`. + const RAW_DEFAULT: NonZeroU64 = unsafe { + // SAFETY: We are constructing a greater than zero + // value, so the API contract is never violated. + // Moreover, 21 MiB <= 90 MiB. + NonZeroU64::new_unchecked(21 << 20) + }; + /// The (raw) upper bound of a [`ProposalBytes`] value. + /// + /// The maximum space a serialized Tendermint block can + /// occupy is 100 MiB. We reserve 10 MiB for serialization + /// overhead, evidence and header data, and 90 MiB for + /// tx data. + const RAW_MAX: NonZeroU64 = unsafe { + // SAFETY: We are constructing a greater than zero + // value, so the API contract is never violated. + NonZeroU64::new_unchecked(90 << 20) + }; +} + +impl ProposalBytes { + /// Return the number of bytes as a [`u64`] value. + #[inline] + pub const fn get(self) -> u64 { + self.inner.get() + } + + /// Try to construct a new [`ProposalBytes`] instance, + /// from the given `max_bytes` value. + /// + /// This function will return [`None`] if `max_bytes` is not within + /// the inclusive range of 1 to [`ProposalBytes::MAX`]. + #[inline] + pub fn new(max_bytes: u64) -> Option { + NonZeroU64::new(max_bytes) + .map(|inner| Self { inner }) + .and_then(|value| { + if value.get() > Self::RAW_MAX.get() { + None + } else { + Some(value) + } + }) + } +} + /// Development default chain ID. Must be [`CHAIN_ID_LENGTH`] long. #[cfg(feature = "dev")] pub const DEFAULT_CHAIN_ID: &str = "namada-devchain.00000000000000"; @@ -110,7 +270,7 @@ impl Default for ChainId { } } -impl Display for ChainId { +impl fmt::Display for ChainId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0) } @@ -158,7 +318,7 @@ impl FromStr for ChainId { #[serde(transparent)] pub struct ChainIdPrefix(String); -impl Display for ChainIdPrefix { +impl fmt::Display for ChainIdPrefix { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0) } @@ -234,5 +394,16 @@ mod tests { let errors = chain_id.validate(&genesis_bytes); assert!(errors.is_empty(), "There should be no validation errors {:#?}", errors); } + + /// Test if [`ProposalBytes`] serde serialization is correct. + #[test] + fn test_proposal_size_serialize_roundtrip(s in 1u64..=ProposalBytes::MAX.get()) { + let size = ProposalBytes::new(s).expect("Test failed"); + assert_eq!(size.get(), s); + let json = serde_json::to_string(&size).expect("Test failed"); + let deserialized: ProposalBytes = + serde_json::from_str(&json).expect("Test failed"); + assert_eq!(size, deserialized); + } } } diff --git a/core/src/types/eth_abi.rs b/core/src/types/eth_abi.rs new file mode 100644 index 0000000000..75f0dd1f25 --- /dev/null +++ b/core/src/types/eth_abi.rs @@ -0,0 +1,154 @@ +//! This module defines encoding methods compatible with Ethereum +//! smart contracts. + +use std::marker::PhantomData; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +#[doc(inline)] +pub use ethabi::token::Token; +use tiny_keccak::{Hasher, Keccak}; + +use crate::types::keccak::{keccak_hash, KeccakHash}; + +/// A container for data types that are able to be Ethereum ABI-encoded. +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize, BorshSchema)] +#[repr(transparent)] +pub struct EncodeCell { + /// ABI-encoded value of type `T`. + encoded_data: Vec, + /// Indicate we do not own values of type `T`. + /// + /// Passing `PhantomData` here would trigger the drop checker, + /// which is not the desired behavior, since we own an encoded value + /// of `T`, not a value of `T` itself. + _marker: PhantomData<*const T>, +} + +impl EncodeCell { + /// Return a new ABI encoded value of type `T`. + pub fn new(value: &T) -> Self + where + T: Encode, + { + let encoded_data = { + let tokens = value.tokenize(); + ethabi::encode(tokens.as_slice()) + }; + Self { + encoded_data, + _marker: PhantomData, + } + } + + /// Return the underlying ABI encoded value. + pub fn into_inner(self) -> Vec { + self.encoded_data + } +} + +/// Contains a method to encode data to a format compatible with Ethereum. +pub trait Encode: Sized { + /// Encodes a struct into a sequence of ABI + /// [`Token`] instances. + fn tokenize(&self) -> [Token; N]; + + /// Returns the encoded [`Token`] instances, in a type-safe enclosure. + fn encode(&self) -> EncodeCell { + EncodeCell::new(self) + } + + /// Encodes a slice of [`Token`] instances, and returns the + /// keccak hash of the encoded string. + fn keccak256(&self) -> KeccakHash { + keccak_hash(self.encode().into_inner().as_slice()) + } + + /// Encodes a slice of [`Token`] instances, and returns the + /// keccak hash of the encoded string appended to an Ethereum + /// signature header. + fn signed_keccak256(&self) -> KeccakHash { + let mut output = [0; 32]; + + let eth_message = { + let message = self.encode().into_inner(); + + let mut eth_message = + format!("\x19Ethereum Signed Message:\n{}", message.len()) + .into_bytes(); + eth_message.extend_from_slice(&message); + eth_message + }; + + let mut state = Keccak::v256(); + state.update(ð_message); + state.finalize(&mut output); + + KeccakHash(output) + } +} + +/// Represents an Ethereum encoding method equivalent +/// to `abi.encode`. +pub type AbiEncode = [Token; N]; + +impl Encode for AbiEncode { + #[inline] + fn tokenize(&self) -> [Token; N] { + self.clone() + } +} + +// TODO: test signatures here once we merge secp keys +#[cfg(test)] +mod tests { + use std::convert::TryInto; + + use data_encoding::HEXLOWER; + use ethabi::ethereum_types::U256; + + use super::*; + + /// Checks if we get the same result as `abi.encode`, for some given + /// input data. + #[test] + fn test_abi_encode() { + let expected = "0x000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000047465737400000000000000000000000000000000000000000000000000000000"; + let expected = HEXLOWER + .decode(&expected.as_bytes()[2..]) + .expect("Test failed"); + let got = AbiEncode::encode(&[ + Token::Uint(U256::from(42u64)), + Token::String("test".into()), + ]); + assert_eq!(expected, got.into_inner()); + } + + /// Sanity check our keccak hash implementation. + #[test] + fn test_keccak_hash_impl() { + let expected = + "1c8aff950685c2ed4bc3174f3472287b56d9517b9c948127319a09a7a36deac8"; + assert_eq!( + expected, + &HEXLOWER.encode( + &{ + let mut st = Keccak::v256(); + let mut output = [0; 32]; + st.update(b"hello"); + st.finalize(&mut output); + output + }[..] + ) + ); + } + + /// Test that the methods for converting a keccak hash to/from + /// a string type are inverses. + #[test] + fn test_hex_roundtrip() { + let original = + "1C8AFF950685C2ED4BC3174F3472287B56D9517B9C948127319A09A7A36DEAC8"; + let keccak_hash: KeccakHash = original.try_into().expect("Test failed"); + assert_eq!(keccak_hash.to_string().as_str(), original); + } +} diff --git a/core/src/types/eth_bridge_pool.rs b/core/src/types/eth_bridge_pool.rs new file mode 100644 index 0000000000..aa2cfebc4a --- /dev/null +++ b/core/src/types/eth_bridge_pool.rs @@ -0,0 +1,177 @@ +//! The necessary type definitions for the contents of the +//! Ethereum bridge pool +use std::collections::BTreeSet; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use ethabi::token::Token; +use serde::{Deserialize, Serialize}; + +use crate::ledger::eth_bridge::storage::bridge_pool::BridgePoolProof; +use crate::types::address::Address; +use crate::types::eth_abi::Encode; +use crate::types::ethereum_events::{EthAddress, Uint}; +use crate::types::keccak::KeccakHash; +use crate::types::storage::{BlockHeight, DbKeySeg, Key}; +use crate::types::token::Amount; +use crate::types::vote_extensions::validator_set_update::ValidatorSetArgs; + +/// A namespace used in our Ethereuem smart contracts +const NAMESPACE: &str = "transfer"; + +/// A transfer message to be submitted to Ethereum +/// to move assets from Namada across the bridge. +#[derive( + Debug, + Clone, + Hash, + PartialOrd, + PartialEq, + Ord, + Eq, + Serialize, + Deserialize, + BorshSerialize, + BorshDeserialize, + BorshSchema, +)] +pub struct TransferToEthereum { + /// The type of token + pub asset: EthAddress, + /// The recipient address + pub recipient: EthAddress, + /// The sender of the transfer + pub sender: Address, + /// The amount to be transferred + pub amount: Amount, + /// a nonce for replay protection + pub nonce: Uint, +} + +/// A transfer message to Ethereum sitting in the +/// bridge pool, waiting to be relayed +#[derive( + Debug, + Clone, + Hash, + PartialOrd, + PartialEq, + Ord, + Eq, + Serialize, + Deserialize, + BorshSerialize, + BorshDeserialize, + BorshSchema, +)] +pub struct PendingTransfer { + /// The message to send to Ethereum to + pub transfer: TransferToEthereum, + /// The amount of gas fees (in NAM) + /// paid by the user sending this transfer + pub gas_fee: GasFee, +} + +impl Encode<8> for PendingTransfer { + fn tokenize(&self) -> [Token; 8] { + // TODO: This version should be looked up from storage + let version = Token::Uint(1.into()); + let namespace = Token::String(NAMESPACE.into()); + let from = Token::Address(self.transfer.asset.0.into()); + let fee = Token::Uint(u64::from(self.gas_fee.amount).into()); + let to = Token::Address(self.transfer.recipient.0.into()); + let amount = Token::Uint(u64::from(self.transfer.amount).into()); + let fee_from = Token::String(self.gas_fee.payer.to_string()); + let nonce = Token::Uint(self.transfer.nonce.clone().into()); + [version, namespace, from, to, amount, fee, fee_from, nonce] + } +} + +impl From<&PendingTransfer> for Key { + fn from(transfer: &PendingTransfer) -> Self { + Key { + segments: vec![DbKeySeg::StringSeg( + transfer.keccak256().to_string(), + )], + } + } +} + +/// The amount of NAM to be payed to the relayer of +/// a transfer across the Ethereum Bridge to compensate +/// for Ethereum gas fees. +#[derive( + Debug, + Clone, + Hash, + PartialOrd, + PartialEq, + Ord, + Eq, + Serialize, + Deserialize, + BorshSerialize, + BorshDeserialize, + BorshSchema, +)] +pub struct GasFee { + /// The amount of fees (in NAM) + pub amount: Amount, + /// The account of fee payer. + pub payer: Address, +} + +/// A Merkle root (Keccak hash) of the Ethereum +/// bridge pool that has been signed by validators' +/// Ethereum keys. +#[derive(Debug, Clone, BorshSerialize, BorshDeserialize, BorshSchema)] +pub struct MultiSignedMerkleRoot { + /// The signatures from validators + pub sigs: BTreeSet, + /// The Merkle root being signed + pub root: KeccakHash, + /// The block height at which this root was valid + pub height: BlockHeight, +} + +impl Encode<2> for MultiSignedMerkleRoot { + fn tokenize(&self) -> [Token; 2] { + let MultiSignedMerkleRoot { sigs, root, .. } = self; + // TODO: check the tokenization of the signatures + let sigs = Token::Array( + sigs.iter().map(|sig| sig.tokenize()[0].clone()).collect(), + ); + let root = Token::FixedBytes(root.0.to_vec()); + [sigs, root] + } +} + +/// All the information to relay to Ethereum +/// that a set of transfers exist in the Ethereum +/// bridge pool. +pub struct RelayProof { + /// Information about the signing validators + pub validator_args: ValidatorSetArgs, + /// A merkle root signed by a quorum of validators + pub root: MultiSignedMerkleRoot, + /// A membership proof + pub proof: BridgePoolProof, + /// A nonce for the batch for replay protection + pub nonce: Uint, +} + +impl Encode<7> for RelayProof { + fn tokenize(&self) -> [Token; 7] { + let [val_set_args] = self.validator_args.tokenize(); + let [sigs, root] = self.root.tokenize(); + let [proof, transfers, flags] = self.proof.tokenize(); + [ + val_set_args, + sigs, + transfers, + root, + proof, + flags, + Token::Uint(self.nonce.clone().into()), + ] + } +} diff --git a/core/src/types/ethereum_events.rs b/core/src/types/ethereum_events.rs new file mode 100644 index 0000000000..f3b6af9d5a --- /dev/null +++ b/core/src/types/ethereum_events.rs @@ -0,0 +1,394 @@ +//! Types representing data intended for Namada via Ethereum events + +use std::fmt::Display; +use std::str::FromStr; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use ethabi::Uint as ethUint; +use eyre::{eyre, Context}; +use serde::{Deserialize, Serialize}; + +use crate::types::address::Address; +use crate::types::hash::Hash; +use crate::types::keccak::KeccakHash; +use crate::types::storage::{DbKeySeg, KeySeg}; +use crate::types::token::Amount; + +/// Namada native type to replace the ethabi::Uint type +#[derive( + Clone, + Debug, + Default, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, + Serialize, + Deserialize, + BorshSerialize, + BorshDeserialize, + BorshSchema, +)] +pub struct Uint(pub [u64; 4]); + +impl From for Uint { + fn from(value: ethUint) -> Self { + Self(value.0) + } +} + +impl From for ethUint { + fn from(value: Uint) -> Self { + Self(value.0) + } +} + +impl From for Uint { + fn from(value: u64) -> Self { + ethUint::from(value).into() + } +} + +/// Representation of address on Ethereum. The inner value is the last 20 bytes +/// of the public key that controls the account. +#[derive( + Copy, + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + BorshSerialize, + BorshDeserialize, + BorshSchema, +)] +#[serde(try_from = "String")] +#[serde(into = "String")] +pub struct EthAddress(pub [u8; 20]); + +impl EthAddress { + /// The canonical way we represent an [`EthAddress`] in storage keys. A + /// 40-character lower case hexadecimal address prefixed by '0x'. + /// e.g. "0x6b175474e89094c44da98b954eedeac495271d0f" + pub fn to_canonical(&self) -> String { + format!("{:?}", ethabi::ethereum_types::Address::from(&self.0)) + } +} + +impl Display for EthAddress { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.to_canonical()) + } +} + +impl FromStr for EthAddress { + type Err = eyre::Error; + + /// Parses an [`EthAddress`] from a standard hex-encoded Ethereum address + /// string. e.g. "0x6B175474E89094C44Da98b954EedeAC495271d0F" + fn from_str(s: &str) -> Result { + let h160 = ethabi::ethereum_types::Address::from_str(s) + .wrap_err_with(|| eyre!("couldn't parse Ethereum address {}", s))?; + Ok(Self(h160.into())) + } +} + +impl TryFrom for EthAddress { + type Error = eyre::Error; + + fn try_from(string: String) -> Result { + Self::from_str(string.as_ref()) + } +} + +impl From for String { + fn from(addr: EthAddress) -> Self { + addr.to_string() + } +} + +impl KeySeg for EthAddress { + fn parse(string: String) -> crate::types::storage::Result { + Self::from_str(string.as_str()) + .map_err(|_| crate::types::storage::Error::ParseKeySeg(string)) + } + + fn raw(&self) -> String { + self.to_canonical() + } + + fn to_db_key(&self) -> DbKeySeg { + DbKeySeg::StringSeg(self.raw()) + } +} + +/// An Ethereum event to be processed by the Namada ledger +#[derive( + PartialEq, + Eq, + PartialOrd, + Hash, + Ord, + Clone, + Debug, + BorshSerialize, + BorshDeserialize, + BorshSchema, +)] +pub enum EthereumEvent { + /// Event transferring batches of ether or Ethereum based ERC20 tokens + /// from Ethereum to wrapped assets on Namada + TransfersToNamada { + /// Monotonically increasing nonce + #[allow(dead_code)] + nonce: Uint, + /// The batch of transfers + #[allow(dead_code)] + transfers: Vec, + }, + /// A confirmation event that a batch of transfers have been made + /// from Namada to Ethereum + TransfersToEthereum { + /// Monotonically increasing nonce + #[allow(dead_code)] + nonce: Uint, + /// The batch of transfers + #[allow(dead_code)] + transfers: Vec, + }, + /// Event indication that the validator set has been updated + /// in the governance contract + ValidatorSetUpdate { + /// Monotonically increasing nonce + #[allow(dead_code)] + nonce: Uint, + /// Hash of the validators in the bridge contract + #[allow(dead_code)] + bridge_validator_hash: KeccakHash, + /// Hash of the validators in the governance contract + #[allow(dead_code)] + governance_validator_hash: KeccakHash, + }, + /// Event indication that a new smart contract has been + /// deployed + NewContract { + /// Name of the contract + #[allow(dead_code)] + name: String, + /// Address of the contract on Ethereum + #[allow(dead_code)] + address: EthAddress, + }, + /// Event indicating that a smart contract has been updated + UpgradedContract { + /// Name of the contract + #[allow(dead_code)] + name: String, + /// Address of the contract on Ethereum + #[allow(dead_code)] + address: EthAddress, + }, + /// Event indication a new Ethereum based token has been whitelisted for + /// transfer across the bridge + UpdateBridgeWhitelist { + /// Monotonically increasing nonce + #[allow(dead_code)] + nonce: Uint, + /// Tokens to be allowed to be transferred across the bridge + #[allow(dead_code)] + whitelist: Vec, + }, +} + +impl EthereumEvent { + /// SHA256 of the Borsh serialization of the [`EthereumEvent`]. + pub fn hash(&self) -> Result { + let bytes = self.try_to_vec()?; + Ok(Hash::sha256(bytes)) + } +} + +/// An event transferring some kind of value from Ethereum to Namada +#[derive( + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Hash, + Ord, + BorshSerialize, + BorshDeserialize, + BorshSchema, +)] +pub struct TransferToNamada { + /// Quantity of the ERC20 token in the transfer + pub amount: Amount, + /// Address of the smart contract issuing the token + pub asset: EthAddress, + /// The address receiving wrapped assets on Namada + pub receiver: Address, +} + +/// An event transferring some kind of value from Namada to Ethereum +#[derive( + Clone, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + BorshSerialize, + BorshDeserialize, + BorshSchema, +)] +pub struct TransferToEthereum { + /// Quantity of wrapped Asset in the transfer + pub amount: Amount, + /// Address of the smart contract issuing the token + pub asset: EthAddress, + /// The address receiving assets on Ethereum + pub receiver: EthAddress, +} + +/// struct for whitelisting a token from Ethereum. +/// Includes the address of issuing contract and +/// a cap on the max amount of this token allowed to be +/// held by the bridge. +#[derive( + Clone, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + BorshSerialize, + BorshDeserialize, + BorshSchema, +)] +#[allow(dead_code)] +pub struct TokenWhitelist { + /// Address of Ethereum smart contract issuing token + pub token: EthAddress, + /// Maximum amount of token allowed on the bridge + pub cap: Amount, +} + +#[cfg(test)] +pub mod tests { + use std::str::FromStr; + + use super::*; + + #[test] + fn test_eth_address_to_canonical() { + let canonical = testing::DAI_ERC20_ETH_ADDRESS.to_canonical(); + + assert_eq!( + testing::DAI_ERC20_ETH_ADDRESS_CHECKSUMMED.to_ascii_lowercase(), + canonical, + ); + } + + #[test] + fn test_eth_address_from_str() { + let addr = + EthAddress::from_str(testing::DAI_ERC20_ETH_ADDRESS_CHECKSUMMED) + .unwrap(); + + assert_eq!(testing::DAI_ERC20_ETH_ADDRESS, addr); + } + + #[test] + fn test_eth_address_from_str_error() { + let result = EthAddress::from_str( + "arbitrary string which isn't an Ethereum address", + ); + + assert!(result.is_err()); + } + + /// Test that serde correct serializes EthAddress types to/from lowercase + /// hex encodings + #[test] + fn test_eth_address_serde_roundtrip() { + let addr = + EthAddress::from_str(testing::DAI_ERC20_ETH_ADDRESS_CHECKSUMMED) + .unwrap(); + let serialized = serde_json::to_string(&addr).expect("Test failed"); + assert_eq!( + serialized, + format!( + r#""{}""#, + testing::DAI_ERC20_ETH_ADDRESS_CHECKSUMMED.to_lowercase() + ) + ); + let deserialized: EthAddress = + serde_json::from_str(&serialized).expect("Test failed"); + assert_eq!(addr, deserialized); + } +} + +#[allow(missing_docs)] +/// Test helpers +#[cfg(any(test, feature = "testing"))] +pub mod testing { + use super::*; + use crate::types::token::{self, Amount}; + + pub const DAI_ERC20_ETH_ADDRESS_CHECKSUMMED: &str = + "0x6B175474E89094C44Da98b954EedeAC495271d0F"; + pub const DAI_ERC20_ETH_ADDRESS: EthAddress = EthAddress([ + 107, 23, 84, 116, 232, 144, 148, 196, 77, 169, 139, 149, 78, 237, 234, + 196, 149, 39, 29, 15, + ]); + pub const USDC_ERC20_ETH_ADDRESS_CHECKSUMMED: &str = + "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"; + pub const USDC_ERC20_ETH_ADDRESS: EthAddress = EthAddress([ + 160, 184, 105, 145, 198, 33, 139, 54, 193, 209, 157, 74, 46, 158, 176, + 206, 54, 6, 235, 72, + ]); + + pub fn arbitrary_eth_address() -> EthAddress { + DAI_ERC20_ETH_ADDRESS + } + + pub fn arbitrary_nonce() -> Uint { + 123.into() + } + + pub fn arbitrary_keccak_hash() -> KeccakHash { + KeccakHash([0; 32]) + } + + pub fn arbitrary_amount() -> Amount { + Amount::from(1_000) + } + + pub fn arbitrary_bonded_stake() -> token::Amount { + token::Amount::from(1_000) + } + + /// A [`EthereumEvent::TransfersToNamada`] containing a single transfer of + /// some arbitrary ERC20 + pub fn arbitrary_single_transfer( + nonce: Uint, + receiver: Address, + ) -> EthereumEvent { + EthereumEvent::TransfersToNamada { + nonce, + transfers: vec![TransferToNamada { + amount: arbitrary_amount(), + asset: arbitrary_eth_address(), + receiver, + }], + } + } +} diff --git a/core/src/types/hash.rs b/core/src/types/hash.rs index 74bfe3dd45..01c38de5ee 100644 --- a/core/src/types/hash.rs +++ b/core/src/types/hash.rs @@ -4,8 +4,6 @@ use std::fmt::{self, Display}; use std::ops::Deref; use std::str::FromStr; -use arse_merkle_tree::traits::Value; -use arse_merkle_tree::{Hash as TreeHash, H256}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use data_encoding::HEXUPPER; use serde::{Deserialize, Serialize}; @@ -15,6 +13,9 @@ use thiserror::Error; /// The length of the transaction hash string pub const HASH_LENGTH: usize = 32; +/// The length of the hex encoded transaction hash. +pub const HEX_HASH_LENGTH: usize = HASH_LENGTH * 2; + #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { @@ -97,7 +98,7 @@ impl TryFrom<&str> for Hash { fn try_from(string: &str) -> HashResult { let vec = HEXUPPER - .decode(string.as_ref()) + .decode(string.to_uppercase().as_ref()) .map_err(Error::FromStringError)?; Self::try_from(&vec[..]) } @@ -142,37 +143,22 @@ impl From for crate::tendermint::Hash { } } -impl From for TreeHash { - fn from(hash: Hash) -> Self { - Self::from(hash.0) - } -} +#[cfg(test)] +mod tests { + use proptest::prelude::*; + use proptest::string::{string_regex, RegexGeneratorStrategy}; -impl Value for Hash { - fn as_slice(&self) -> &[u8] { - self.0.as_slice() - } + use super::*; - fn zero() -> Self { - Hash([0u8; HASH_LENGTH]) + /// Returns a proptest strategy that yields hex encoded hashes. + fn hex_encoded_hash_strat() -> RegexGeneratorStrategy { + string_regex(r"[a-fA-F0-9]{64}").unwrap() } -} -impl From for H256 { - fn from(hash: Hash) -> Self { - hash.0.into() - } -} - -impl From for Hash { - fn from(hash: H256) -> Self { - Self(hash.into()) - } -} - -impl From<&H256> for Hash { - fn from(hash: &H256) -> Self { - let hash = hash.to_owned(); - Self(hash.into()) + proptest! { + #[test] + fn test_hash_string(hex_hash in hex_encoded_hash_strat()) { + let _: Hash = hex_hash.try_into().unwrap(); + } } } diff --git a/core/src/types/keccak.rs b/core/src/types/keccak.rs new file mode 100644 index 0000000000..a2ee9c0d87 --- /dev/null +++ b/core/src/types/keccak.rs @@ -0,0 +1,103 @@ +//! This module is for hashing Namada types using the keccak256 +//! hash function in a way that is compatible with smart contracts +//! on Ethereum. +use std::convert::{TryFrom, TryInto}; +use std::fmt::Display; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use data_encoding::HEXUPPER; +use thiserror::Error; +use tiny_keccak::{Hasher, Keccak}; + +use crate::types::hash::{Hash, HASH_LENGTH}; + +/// Errors for converting / parsing Keccak hashes +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum TryFromError { + #[error("Unexpected tx hash length {0}, expected {1}")] + WrongLength(usize, usize), + #[error("Failed trying to convert slice to a hash: {0}")] + ConversionFailed(std::array::TryFromSliceError), + #[error("Failed to convert string into a hash: {0}")] + FromStringError(data_encoding::DecodeError), +} + +/// Represents a Keccak hash. +#[derive( + Clone, + Debug, + Default, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + BorshSerialize, + BorshDeserialize, + BorshSchema, +)] +pub struct KeccakHash(pub [u8; 32]); + +impl Display for KeccakHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + for byte in &self.0 { + write!(f, "{:02X}", byte)?; + } + Ok(()) + } +} +impl From for Hash { + fn from(hash: KeccakHash) -> Self { + Hash(hash.0) + } +} + +impl From for KeccakHash { + fn from(hash: Hash) -> Self { + KeccakHash(hash.0) + } +} + +impl TryFrom<&[u8]> for KeccakHash { + type Error = TryFromError; + + fn try_from(value: &[u8]) -> Result { + if value.len() != HASH_LENGTH { + return Err(TryFromError::WrongLength(value.len(), HASH_LENGTH)); + } + let hash: [u8; HASH_LENGTH] = + TryFrom::try_from(value).map_err(TryFromError::ConversionFailed)?; + Ok(KeccakHash(hash)) + } +} + +impl TryFrom for KeccakHash { + type Error = TryFromError; + + fn try_from(string: String) -> Result { + string.as_str().try_into() + } +} + +impl TryFrom<&str> for KeccakHash { + type Error = TryFromError; + + fn try_from(string: &str) -> Result { + let bytes: Vec = HEXUPPER + .decode(string.as_bytes()) + .map_err(TryFromError::FromStringError)?; + Self::try_from(bytes.as_slice()) + } +} + +/// Hash bytes using Keccak +pub fn keccak_hash(bytes: &[u8]) -> KeccakHash { + let mut output = [0; 32]; + + let mut hasher = Keccak::v256(); + hasher.update(bytes); + hasher.finalize(&mut output); + + KeccakHash(output) +} diff --git a/core/src/types/key/common.rs b/core/src/types/key/common.rs index e928579367..633367053c 100644 --- a/core/src/types/key/common.rs +++ b/core/src/types/key/common.rs @@ -1,5 +1,6 @@ //! Cryptographic keys +use std::convert::TryFrom; use std::fmt::Display; use std::str::FromStr; @@ -8,12 +9,14 @@ use data_encoding::HEXLOWER; #[cfg(feature = "rand")] use rand::{CryptoRng, RngCore}; use serde::{Deserialize, Serialize}; +use thiserror::Error; use super::{ ed25519, secp256k1, ParsePublicKeyError, ParseSecretKeyError, ParseSignatureError, RefTo, SchemeType, SigScheme as SigSchemeTrait, VerifySigError, }; +use crate::types::ethereum_events::EthAddress; /// Public key #[derive( @@ -84,6 +87,24 @@ impl FromStr for PublicKey { } } +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum EthAddressConvError { + #[error("Eth key cannot be ed25519, only secp256k1")] + CannotBeEd25519, +} + +impl TryFrom<&PublicKey> for EthAddress { + type Error = EthAddressConvError; + + fn try_from(value: &PublicKey) -> Result { + match value { + PublicKey::Ed25519(_) => Err(EthAddressConvError::CannotBeEd25519), + PublicKey::Secp256k1(pk) => Ok(EthAddress::from(pk)), + } + } +} + /// Secret key #[derive(Debug, Clone, BorshSerialize, BorshDeserialize, BorshSchema)] #[allow(clippy::large_enum_variant)] diff --git a/core/src/types/key/mod.rs b/core/src/types/key/mod.rs index 3a500addb0..9b4efd6f21 100644 --- a/core/src/types/key/mod.rs +++ b/core/src/types/key/mod.rs @@ -423,6 +423,32 @@ pub mod testing { .unwrap() } + /// An Ethereum keypair for tests + pub fn keypair_3() -> ::SecretKey { + let bytes = [ + 0xf3, 0x78, 0x78, 0x80, 0xba, 0x85, 0x0b, 0xa4, 0xc5, 0x74, 0x50, + 0x5a, 0x23, 0x54, 0x6d, 0x46, 0x74, 0xa1, 0x3f, 0x09, 0x75, 0x0c, + 0xf4, 0xb5, 0xb8, 0x17, 0x69, 0x64, 0xf4, 0x08, 0xd4, 0x80, + ]; + secp256k1::SecretKey::try_from_slice(bytes.as_ref()) + .unwrap() + .try_to_sk() + .unwrap() + } + + /// An Ethereum keypair for tests + pub fn keypair_4() -> ::SecretKey { + let bytes = [ + 0x68, 0xab, 0xce, 0x64, 0x54, 0x07, 0x7e, 0xf5, 0x1a, 0xb4, 0x31, + 0x7a, 0xb8, 0x8b, 0x98, 0x30, 0x27, 0x11, 0x4e, 0x58, 0x69, 0xd6, + 0x45, 0x94, 0xdc, 0x90, 0x8d, 0x94, 0xee, 0x58, 0x46, 0x91, + ]; + secp256k1::SecretKey::try_from_slice(bytes.as_ref()) + .unwrap() + .try_to_sk() + .unwrap() + } + /// Generate an arbitrary [`super::SecretKey`]. pub fn arb_keypair() -> impl Strategy { any::<[u8; 32]>().prop_map(move |seed| { diff --git a/core/src/types/key/secp256k1.rs b/core/src/types/key/secp256k1.rs index d901e46d25..ba622b9d7e 100644 --- a/core/src/types/key/secp256k1.rs +++ b/core/src/types/key/secp256k1.rs @@ -1,5 +1,6 @@ //! secp256k1 keys and related functionality +use std::cmp::Ordering; use std::fmt; use std::fmt::{Debug, Display}; use std::hash::{Hash, Hasher}; @@ -8,6 +9,7 @@ use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use data_encoding::HEXLOWER; +use ethabi::Token; use libsecp256k1::RecoveryId; #[cfg(feature = "rand")] use rand::{CryptoRng, RngCore}; @@ -19,6 +21,13 @@ use super::{ ParsePublicKeyError, ParseSecretKeyError, ParseSignatureError, RefTo, SchemeType, SigScheme as SigSchemeTrait, VerifySigError, }; +use crate::types::eth_abi::Encode; +use crate::types::ethereum_events::EthAddress; + +/// The provided constant is for a traditional +/// signature on this curve. For Ethereum, an extra byte is included +/// that prevents malleability attacks. +pub const SIGNATURE_LENGTH: usize = libsecp256k1::util::SIGNATURE_SIZE + 1; /// secp256k1 public key #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] @@ -137,6 +146,23 @@ impl From for PublicKey { } } +impl From<&PublicKey> for EthAddress { + fn from(pk: &PublicKey) -> Self { + use tiny_keccak::Hasher; + + let mut hasher = tiny_keccak::Keccak::v256(); + // We're removing the first byte with + // `libsecp256k1::util::TAG_PUBKEY_FULL` + let pk_bytes = &pk.0.serialize()[1..]; + hasher.update(pk_bytes); + let mut output = [0_u8; 32]; + hasher.finalize(&mut output); + let mut addr = [0; 20]; + addr.copy_from_slice(&output[12..]); + EthAddress(addr) + } +} + /// Secp256k1 secret key #[derive(Debug, Clone)] pub struct SecretKey(pub Box); @@ -288,7 +314,7 @@ impl Serialize for Signature { // TODO: implement the line below, currently cannot support [u8; 64] // serde::Serialize::serialize(&arr, serializer) - let mut seq = serializer.serialize_tuple(arr.len())?; + let mut seq = serializer.serialize_tuple(arr.len() + 1)?; for elem in &arr[..] { seq.serialize_element(elem)?; } @@ -305,12 +331,12 @@ impl<'de> Deserialize<'de> for Signature { struct ByteArrayVisitor; impl<'de> Visitor<'de> for ByteArrayVisitor { - type Value = [u8; libsecp256k1::util::SIGNATURE_SIZE + 1]; + type Value = [u8; SIGNATURE_LENGTH]; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str(&format!( "an array of length {}", - libsecp256k1::util::SIGNATURE_SIZE + SIGNATURE_LENGTH, )) } @@ -318,9 +344,9 @@ impl<'de> Deserialize<'de> for Signature { where A: SeqAccess<'de>, { - let mut arr = [0u8; libsecp256k1::util::SIGNATURE_SIZE + 1]; + let mut arr = [0u8; SIGNATURE_LENGTH]; #[allow(clippy::needless_range_loop)] - for i in 0..libsecp256k1::util::SIGNATURE_SIZE + 1 { + for i in 0..SIGNATURE_LENGTH { arr[i] = seq .next_element()? .ok_or_else(|| Error::invalid_length(i, &self))?; @@ -329,10 +355,8 @@ impl<'de> Deserialize<'de> for Signature { } } - let arr_res = deserializer.deserialize_tuple( - libsecp256k1::util::SIGNATURE_SIZE + 1, - ByteArrayVisitor, - )?; + let arr_res = deserializer + .deserialize_tuple(SIGNATURE_LENGTH, ByteArrayVisitor)?; let sig_array: [u8; 64] = arr_res[..64].try_into().unwrap(); let sig = libsecp256k1::Signature::parse_standard(&sig_array) .map_err(D::Error::custom); @@ -401,6 +425,16 @@ impl BorshSchema for Signature { } } +impl Encode<1> for Signature { + fn tokenize(&self) -> [Token; 1] { + let sig_serialized = libsecp256k1::Signature::serialize(&self.0); + let r = Token::FixedBytes(sig_serialized[..32].to_vec()); + let s = Token::FixedBytes(sig_serialized[32..].to_vec()); + let v = Token::FixedBytes(vec![self.1.serialize()]); + [Token::Tuple(vec![r, s, v])] + } +} + #[allow(clippy::derive_hash_xor_eq)] impl Hash for Signature { fn hash(&self, state: &mut H) { @@ -410,7 +444,18 @@ impl Hash for Signature { impl PartialOrd for Signature { fn partial_cmp(&self, other: &Self) -> Option { - self.0.serialize().partial_cmp(&other.0.serialize()) + match self.0.serialize().partial_cmp(&other.0.serialize()) { + Some(Ordering::Equal) => { + self.1.serialize().partial_cmp(&other.1.serialize()) + } + res => res, + } + } +} + +impl Ord for Signature { + fn cmp(&self, other: &Self) -> Ordering { + self.partial_cmp(other).unwrap() } } @@ -551,3 +596,62 @@ impl super::SigScheme for SigScheme { } } } + +#[cfg(test)] +mod test { + use super::*; + + /// test vector from https://bitcoin.stackexchange.com/a/89848 + const SECRET_KEY_HEX: &str = + "c2c72dfbff11dfb4e9d5b0a20c620c58b15bb7552753601f043db91331b0db15"; + + /// Test that we can recover an Ethereum address from + /// a public secp key. + #[test] + fn test_eth_address_from_secp() { + let expected_pk_hex = "a225bf565ff4ea039bccba3e26456e910cd74e4616d67ee0a166e26da6e5e55a08d0fa1659b4b547ba7139ca531f62907b9c2e72b80712f1c81ece43c33f4b8b"; + let expected_eth_addr_hex = "6ea27154616a29708dce7650b475dd6b82eba6a3"; + + let sk_bytes = HEXLOWER.decode(SECRET_KEY_HEX.as_bytes()).unwrap(); + let sk = SecretKey::try_from_slice(&sk_bytes[..]).unwrap(); + let pk: PublicKey = sk.ref_to(); + // We're removing the first byte with + // `libsecp256k1::util::TAG_PUBKEY_FULL` + let pk_hex = HEXLOWER.encode(&pk.0.serialize()[1..]); + assert_eq!(expected_pk_hex, pk_hex); + + let eth_addr: EthAddress = (&pk).into(); + let eth_addr_hex = HEXLOWER.encode(ð_addr.0[..]); + assert_eq!(expected_eth_addr_hex, eth_addr_hex); + } + + /// Test serializing and then de-serializing a signature + /// with Serde is idempotent. + #[test] + fn test_roundtrip_serde() { + let sk_bytes = HEXLOWER.decode(SECRET_KEY_HEX.as_bytes()).unwrap(); + let sk = SecretKey::try_from_slice(&sk_bytes[..]).unwrap(); + let to_sign = "test".as_bytes(); + let mut signature = SigScheme::sign(&sk, to_sign); + signature.1 = RecoveryId::parse(3).expect("Test failed"); + let sig_json = serde_json::to_string(&signature).expect("Test failed"); + let sig: Signature = + serde_json::from_str(&sig_json).expect("Test failed"); + assert_eq!(sig, signature) + } + + /// Test serializing and then de-serializing a signature + /// with Borsh is idempotent. + #[test] + fn test_roundtrip_borsh() { + let sk_bytes = HEXLOWER.decode(SECRET_KEY_HEX.as_bytes()).unwrap(); + let sk = SecretKey::try_from_slice(&sk_bytes[..]).unwrap(); + let to_sign = "test".as_bytes(); + let mut signature = SigScheme::sign(&sk, to_sign); + signature.1 = RecoveryId::parse(3).expect("Test failed"); + let sig_bytes = signature.try_to_vec().expect("Test failed"); + let sig = Signature::try_from_slice(sig_bytes.as_slice()) + .expect("Test failed"); + assert_eq!(sig, signature); + } +} diff --git a/core/src/types/mod.rs b/core/src/types/mod.rs index 0550060498..ccae2b8c31 100644 --- a/core/src/types/mod.rs +++ b/core/src/types/mod.rs @@ -2,10 +2,14 @@ pub mod address; pub mod chain; +pub mod eth_abi; +pub mod eth_bridge_pool; +pub mod ethereum_events; pub mod governance; pub mod hash; pub mod ibc; pub mod internal; +pub mod keccak; pub mod key; pub mod masp; pub mod storage; @@ -13,3 +17,5 @@ pub mod time; pub mod token; pub mod transaction; pub mod validity_predicate; +pub mod vote_extensions; +pub mod voting_power; diff --git a/core/src/types/storage.rs b/core/src/types/storage.rs index b27c1e250c..7a6d8d0e99 100644 --- a/core/src/types/storage.rs +++ b/core/src/types/storage.rs @@ -6,17 +6,19 @@ use std::num::ParseIntError; use std::ops::{Add, Deref, Div, Mul, Rem, Sub}; use std::str::FromStr; -use arse_merkle_tree::traits::Value; -use arse_merkle_tree::{InternalKey, Key as TreeKey}; +use arse_merkle_tree::InternalKey; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use data_encoding::BASE32HEX_NOPAD; +use ics23::CommitmentProof; use index_set::vec::VecIndexSet; use serde::{Deserialize, Serialize}; use thiserror::Error; use crate::bytes::ByteBuf; +use crate::ledger::eth_bridge::storage::bridge_pool::BridgePoolProof; use crate::types::address::{self, Address}; use crate::types::hash::Hash; +use crate::types::keccak::{KeccakHash, TryFromError}; use crate::types::time::DateTimeUtc; /// The maximum size of an IBC key (in bytes) allowed in merkle-ized storage @@ -25,18 +27,20 @@ pub const IBC_KEY_LIMIT: usize = 120; #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { - #[error("TEMPORARY error: {error}")] - Temporary { error: String }, #[error("Error parsing address: {0}")] ParseAddress(address::DecodeError), #[error("Error parsing address from a storage key")] ParseAddressFromKey, #[error("Reserved prefix or string is specified: {0}")] InvalidKeySeg(String), - #[error("Error parsing key segment {0}")] + #[error("Error parsing key segment: {0}")] ParseKeySeg(String), - #[error("Could not parse string: '{0}' into requested type: {1}")] - ParseError(String, String), + #[error("Error parsing block hash: {0}")] + ParseBlockHash(String), + #[error("The key is empty")] + EmptyKey, + #[error("They key is missing sub-key segments: {0}")] + MissingSegments(String), } /// Result for functions that may fail @@ -226,13 +230,11 @@ impl TryFrom<&[u8]> for BlockHash { fn try_from(value: &[u8]) -> Result { if value.len() != BLOCK_HASH_LENGTH { - return Err(Error::Temporary { - error: format!( - "Unexpected block hash length {}, expected {}", - value.len(), - BLOCK_HASH_LENGTH - ), - }); + return Err(Error::ParseBlockHash(format!( + "Unexpected block hash length {}, expected {}", + value.len(), + BLOCK_HASH_LENGTH + ))); } let mut hash = [0; 32]; hash.copy_from_slice(value); @@ -244,18 +246,7 @@ impl TryFrom> for BlockHash { type Error = self::Error; fn try_from(value: Vec) -> Result { - if value.len() != BLOCK_HASH_LENGTH { - return Err(Error::Temporary { - error: format!( - "Unexpected block hash length {}, expected {}", - value.len(), - BLOCK_HASH_LENGTH - ), - }); - } - let mut hash = [0; 32]; - hash.copy_from_slice(&value); - Ok(BlockHash(hash)) + value.as_slice().try_into() } } @@ -349,35 +340,6 @@ pub enum TreeKeyError { InvalidMerkleKey(String), } -impl TreeKey for StringKey { - type Error = TreeKeyError; - - fn as_slice(&self) -> &[u8] { - &self.original.as_slice()[..self.length] - } - - fn try_from_bytes(bytes: &[u8]) -> std::result::Result { - let mut tree_key = [0u8; IBC_KEY_LIMIT]; - let mut original = [0u8; IBC_KEY_LIMIT]; - let mut length = 0; - for (i, byte) in bytes.iter().enumerate() { - if i >= IBC_KEY_LIMIT { - return Err(TreeKeyError::InvalidMerkleKey( - "Input IBC key is too large".into(), - )); - } - original[i] = *byte; - tree_key[i] = byte.wrapping_add(1); - length += 1; - } - Ok(Self { - original, - tree_key: tree_key.into(), - length, - }) - } -} - impl Deref for StringKey { type Target = InternalKey; @@ -445,13 +407,23 @@ impl From for Vec { } } -impl Value for TreeBytes { - fn as_slice(&self) -> &[u8] { - self.0.as_slice() +/// Type of membership proof from a merkle tree +pub enum MembershipProof { + /// ICS23 compliant membership proof + ICS23(CommitmentProof), + /// Bespoke membership proof for the Ethereum bridge pool + BridgePool(BridgePoolProof), +} + +impl From for MembershipProof { + fn from(proof: CommitmentProof) -> Self { + Self::ICS23(proof) } +} - fn zero() -> Self { - TreeBytes::zero() +impl From for MembershipProof { + fn from(proof: BridgePoolProof) -> Self { + Self::BridgePool(proof) } } @@ -582,21 +554,14 @@ impl Key { match self.segments.split_first() { Some((_, rest)) => { if rest.is_empty() { - Err(Error::Temporary { - error: format!( - "The key doesn't have the sub segments: {}", - self - ), - }) + Err(Error::MissingSegments(format!("{self}"))) } else { Ok(Self { segments: rest.to_vec(), }) } } - None => Err(Error::Temporary { - error: "The key is empty".to_owned(), - }), + None => Err(Error::EmptyKey), } } @@ -753,6 +718,36 @@ impl KeySeg for BlockHeight { } } +impl KeySeg for Epoch { + fn parse(string: String) -> Result { + string + .split_once('=') + .and_then(|(prefix, epoch)| (prefix == "E").then_some(epoch)) + .ok_or_else(|| { + Error::ParseKeySeg(format!( + "Invalid epoch prefix on key: {string}" + )) + }) + .and_then(|epoch| { + epoch.parse::().map_err(|e| { + Error::ParseKeySeg(format!( + "Unexpected epoch value {epoch}, {e}" + )) + }) + }) + .map(Epoch) + } + + fn raw(&self) -> String { + let &Epoch(epoch) = self; + format!("E={epoch}") + } + + fn to_db_key(&self) -> DbKeySeg { + DbKeySeg::StringSeg(self.raw()) + } +} + impl KeySeg for Address { fn parse(mut seg: String) -> Result { match seg.chars().next() { @@ -773,6 +768,37 @@ impl KeySeg for Address { } } +impl KeySeg for Hash { + fn parse(seg: String) -> Result { + seg.try_into().map_err(|e: crate::types::hash::Error| { + Error::ParseKeySeg(e.to_string()) + }) + } + + fn raw(&self) -> String { + self.to_string() + } + + fn to_db_key(&self) -> DbKeySeg { + DbKeySeg::StringSeg(self.raw()) + } +} + +impl KeySeg for KeccakHash { + fn parse(seg: String) -> Result { + seg.try_into() + .map_err(|e: TryFromError| Error::ParseKeySeg(e.to_string())) + } + + fn raw(&self) -> String { + self.to_string() + } + + fn to_db_key(&self) -> DbKeySeg { + DbKeySeg::StringSeg(self.raw()) + } +} + /// Implement [`KeySeg`] for a type via base32hex of its BE bytes (using /// `to_le_bytes()` and `from_le_bytes` methods) that maintains sort order of /// the original data. @@ -1079,6 +1105,13 @@ impl Epochs { } None } + + /// Return all starting block heights for each successive Epoch. + /// + /// __INVARIANT:__ The returned values are sorted in ascending order. + pub fn first_block_heights(&self) -> &[BlockHeight] { + &self.first_block_heights + } } /// A value of a storage prefix iterator. @@ -1116,6 +1149,19 @@ mod tests { let key = Key::from(addr.to_db_key()).push(&s).expect("cannnot push the segment"); assert_eq!(key.segments[1].raw(), s); } + + /// Test roundtrip parsing of key segments derived from [`Epoch`] + /// values. + #[test] + fn test_parse_epoch_key_segment(e in 0..=u64::MAX) { + let original_epoch = Epoch(e); + let key_seg = match original_epoch.to_db_key() { + DbKeySeg::StringSeg(s) => s, + _ => panic!("Test failed"), + }; + let parsed_epoch: Epoch = KeySeg::parse(key_seg).expect("Test failed"); + assert_eq!(original_epoch, parsed_epoch); + } } #[test] diff --git a/core/src/types/token.rs b/core/src/types/token.rs index 2bf95735ed..1aa1ad7734 100644 --- a/core/src/types/token.rs +++ b/core/src/types/token.rs @@ -81,6 +81,13 @@ impl Amount { micro: change as u64, } } + + /// Checked addition on amounts + pub fn checked_add(&self, amount: &Amount) -> Option { + self.micro + .checked_add(amount.micro) + .map(|micro| Self { micro }) + } } impl serde::Serialize for Amount { diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index 69b212f4c8..16ccc1b285 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -185,6 +185,11 @@ pub struct InitValidator { pub account_key: common::PublicKey, /// A key to be used for signing blocks and votes on blocks. pub consensus_key: common::PublicKey, + /// An Eth bridge governance public key + pub eth_cold_key: secp256k1::PublicKey, + /// An Eth bridge hot signing public key used for validator set updates and + /// cross-chain transactions + pub eth_hot_key: secp256k1::PublicKey, /// Public key used to sign protocol transactions pub protocol_key: common::PublicKey, /// Serialization of the public session key used in the DKG diff --git a/core/src/types/transaction/protocol.rs b/core/src/types/transaction/protocol.rs index becc17941f..2475dcca72 100644 --- a/core/src/types/transaction/protocol.rs +++ b/core/src/types/transaction/protocol.rs @@ -35,6 +35,9 @@ mod protocol_txs { use crate::proto::Tx; use crate::types::key::*; use crate::types::transaction::{EllipticCurve, TxError, TxType}; + use crate::types::vote_extensions::{ + ethereum_events, validator_set_update, + }; const TX_NEW_DKG_KP_WASM: &str = "tx_update_dkg_session_keypair.wasm"; @@ -76,27 +79,27 @@ mod protocol_txs { DKG(DkgMessage), /// Tx requesting a new DKG session keypair NewDkgKeypair(Tx), - /// Aggregation of Ethereum state changes - /// voted on by validators in last block - EthereumStateUpdate(Tx), + /// Ethereum events contained in vote extensions that + /// are compressed before being included on chain + EthereumEvents(ethereum_events::VextDigest), + /// Validator set updates contained in vote extensions + ValidatorSetUpdate(validator_set_update::VextDigest), + /// Ethereum events seen by some validator + EthEventsVext(ethereum_events::SignedVext), + /// Validator set update signed by some validator + ValSetUpdateVext(validator_set_update::SignedVext), } impl ProtocolTxType { /// Sign a ProtocolTxType and wrap it up in a normal Tx - pub fn sign( - self, - pk: &common::PublicKey, - signing_key: &common::SecretKey, - ) -> Tx { + pub fn sign(self, signing_key: &common::SecretKey) -> Tx { + let pk = signing_key.ref_to(); Tx::new( vec![], Some( - TxType::Protocol(ProtocolTx { - pk: pk.clone(), - tx: self, - }) - .try_to_vec() - .expect("Could not serialize ProtocolTx"), + TxType::Protocol(ProtocolTx { pk, tx: self }) + .try_to_vec() + .expect("Could not serialize ProtocolTx"), ), ) .sign(signing_key) diff --git a/core/src/types/vote_extensions.rs b/core/src/types/vote_extensions.rs new file mode 100644 index 0000000000..2a87ceea5c --- /dev/null +++ b/core/src/types/vote_extensions.rs @@ -0,0 +1,38 @@ +//! This module contains types necessary for processing vote extensions. + +pub mod ethereum_events; +pub mod validator_set_update; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; + +use crate::proto::Signed; + +/// This type represents the data we pass to the extension of +/// a vote at the PreCommit phase of Tendermint. +#[derive( + Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, BorshSchema, +)] +pub struct VoteExtension { + /// Vote extension data related with Ethereum events. + pub ethereum_events: Signed, + /// Vote extension data related with validator set updates. + pub validator_set_update: Option, +} + +/// The digest of the signatures from different validators +/// in [`VoteExtension`] instances. +/// +/// From a [`VoteExtensionDigest`] we yield two signed +/// [`crate::types::transaction::protocol::ProtocolTxType`] transactions: +/// - A `ProtocolTxType::EthereumEvents` tx, and +/// - A `ProtocolTxType::ValidatorSetUpdate` tx +#[derive( + Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, BorshSchema, +)] +#[cfg(feature = "abcipp")] +pub struct VoteExtensionDigest { + /// The digest of Ethereum events vote extension signatures. + pub ethereum_events: ethereum_events::VextDigest, + /// The digest of validator set updates vote extension signatures. + pub validator_set_update: Option, +} diff --git a/core/src/types/vote_extensions/ethereum_events.rs b/core/src/types/vote_extensions/ethereum_events.rs new file mode 100644 index 0000000000..a83df45136 --- /dev/null +++ b/core/src/types/vote_extensions/ethereum_events.rs @@ -0,0 +1,298 @@ +//! Contains types necessary for processing Ethereum events +//! in vote extensions. + +use std::collections::{BTreeSet, HashMap}; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; + +use crate::proto::Signed; +use crate::types::address::Address; +use crate::types::ethereum_events::EthereumEvent; +use crate::types::key::common::{self, Signature}; +use crate::types::storage::BlockHeight; + +/// Type alias for an [`EthereumEventsVext`]. +pub type Vext = EthereumEventsVext; + +/// Represents a [`Vext`] signed by some validator, with +/// a Namada protocol key. +pub type SignedVext = Signed; + +/// Represents a set of [`EthereumEvent`] instances +/// seen by some validator. +/// +/// This struct will be created and signed over by each +/// active validator, to be included as a vote extension at the end of a +/// Tendermint PreCommit phase. +#[derive( + Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, BorshSchema, +)] +pub struct EthereumEventsVext { + /// The block height for which this [`Vext`] was made. + pub block_height: BlockHeight, + /// TODO: the validator's address is temporarily being included + /// until we're able to map a Tendermint address to a validator + /// address (see ) + pub validator_addr: Address, + /// The new ethereum events seen. These should be + /// deterministically ordered. + pub ethereum_events: Vec, +} + +impl Vext { + /// Creates a [`Vext`] without any Ethereum events. + pub fn empty(block_height: BlockHeight, validator_addr: Address) -> Self { + Self { + block_height, + ethereum_events: Vec::new(), + validator_addr, + } + } + + /// Sign a [`Vext`] with a validator's `signing_key`, + /// and return the signed data. + pub fn sign(self, signing_key: &common::SecretKey) -> Signed { + Signed::new(signing_key, self) + } +} + +/// Aggregates an Ethereum event with the corresponding +/// validators who saw this event. +#[derive( + Clone, Debug, PartialEq, Eq, BorshSerialize, BorshDeserialize, BorshSchema, +)] +pub struct MultiSignedEthEvent { + /// The Ethereum event that was signed. + pub event: EthereumEvent, + /// List of addresses of validators who signed this event + /// and block height at which they signed it + pub signers: BTreeSet<(Address, BlockHeight)>, +} + +/// Type alias for an [`EthereumEventsVextDigest`]. +pub type VextDigest = EthereumEventsVextDigest; + +/// Compresses a set of signed [`Vext`] instances, to save +/// space on a block. +#[derive( + Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, BorshSchema, +)] +pub struct EthereumEventsVextDigest { + /// The signatures, signing address, and signing block height + /// of each [`Vext`] + pub signatures: HashMap<(Address, BlockHeight), Signature>, + /// The events that were reported + pub events: Vec, +} + +impl VextDigest { + /// Build a singleton [`VextDigest`], from the provided [`Vext`]. + #[inline] + pub fn singleton(ext: Signed) -> VextDigest { + VextDigest { + signatures: HashMap::from([( + (ext.data.validator_addr.clone(), ext.data.block_height), + ext.sig, + )]), + events: ext + .data + .ethereum_events + .into_iter() + .map(|event| MultiSignedEthEvent { + event, + signers: BTreeSet::from([( + ext.data.validator_addr.clone(), + ext.data.block_height, + )]), + }) + .collect(), + } + } + + /// Decompresses a set of signed [`Vext`] instances. + pub fn decompress(self, last_height: BlockHeight) -> Vec> { + { + #[allow(clippy::drop_copy)] + drop(last_height); + } + + let VextDigest { signatures, events } = self; + + let mut extensions = vec![]; + + for (validator, sig) in signatures.into_iter() { + let mut ext = Vext::empty(validator.1, validator.0.clone()); + + for event in events.iter() { + if event.signers.contains(&validator) { + ext.ethereum_events.push(event.event.clone()); + } + } + + // TODO: we probably need a manual `Ord` impl for + // `EthereumEvent`, such that this `sort()` is + // always deterministic, regardless + // of crate versions changing and such + ext.ethereum_events.sort(); + + let signed = Signed::new_from(ext, sig); + extensions.push(signed); + } + extensions + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::proto::Signed; + use crate::types::address::{self, Address}; + use crate::types::ethereum_events::{EthereumEvent, Uint}; + use crate::types::hash::Hash; + use crate::types::key; + use crate::types::key::RefTo; + #[cfg(feature = "abcipp")] + use crate::types::storage::BlockHeight; + + /// Test the hashing of an Ethereum event + #[test] + fn test_ethereum_event_hash() { + let nonce = Uint::from(123u64); + let event = EthereumEvent::TransfersToNamada { + nonce, + transfers: vec![], + }; + let hash = event.hash().unwrap(); + + assert_eq!( + hash, + Hash([ + 94, 131, 116, 129, 41, 204, 178, 144, 24, 8, 185, 16, 103, 236, + 209, 191, 20, 89, 145, 17, 41, 233, 31, 98, 185, 6, 217, 204, + 80, 38, 224, 23 + ]) + ); + } + + /// Test decompression of a set of Ethereum events + #[test] + fn test_decompress_ethereum_events() { + // we need to construct a `Vec>` + let sk_1 = key::testing::keypair_1(); + let sk_2 = key::testing::keypair_2(); + + let last_block_height = BlockHeight(123); + + let ev_1 = EthereumEvent::TransfersToNamada { + nonce: 1u64.into(), + transfers: vec![], + }; + let ev_2 = EthereumEvent::TransfersToEthereum { + nonce: 2u64.into(), + transfers: vec![], + }; + + let validator_1 = address::testing::established_address_1(); + let validator_2 = address::testing::established_address_2(); + + let ext = |validator: Address| -> Vext { + let mut ext = Vext::empty(last_block_height, validator); + + ext.ethereum_events.push(ev_1.clone()); + ext.ethereum_events.push(ev_2.clone()); + ext.ethereum_events.sort(); + + ext + }; + + // assume both v1 and v2 saw the same events, + // so each of them signs `ext` with their respective sk + let ext_1 = Signed::new(&sk_1, ext(validator_1.clone())); + let ext_2 = Signed::new(&sk_2, ext(validator_2.clone())); + #[cfg(not(feature = "abcipp"))] + let ext_3 = Signed::new(&sk_1, { + let mut ext = Vext::empty( + BlockHeight(last_block_height.0 - 1), + validator_1.clone(), + ); + ext.ethereum_events.push(ev_1.clone()); + ext.ethereum_events.push(ev_2.clone()); + ext.ethereum_events.sort(); + ext + }); + + #[cfg(feature = "abcipp")] + let ext = vec![ext_1, ext_2]; + #[cfg(not(feature = "abcipp"))] + let ext = vec![ext_1, ext_2, ext_3]; + + // we have the `Signed` instances we need, + // let us now compress them into a single `VextDigest` + #[cfg(feature = "abcipp")] + let signatures: HashMap<_, _> = [ + ((validator_1.clone(), last_block_height), ext[0].sig.clone()), + ((validator_2.clone(), last_block_height), ext[1].sig.clone()), + ] + .into_iter() + .collect(); + #[cfg(not(feature = "abcipp"))] + let signatures: HashMap<_, _> = [ + ((validator_1.clone(), last_block_height), ext[0].sig.clone()), + ((validator_2.clone(), last_block_height), ext[1].sig.clone()), + ( + (validator_1.clone(), BlockHeight(last_block_height.0 - 1)), + ext[2].sig.clone(), + ), + ] + .into_iter() + .collect(); + #[cfg(feature = "abcipp")] + let signers = { + let mut s = BTreeSet::new(); + s.insert((validator_1.clone(), last_block_height)); + s.insert((validator_2, last_block_height)); + s + }; + + #[cfg(not(feature = "abcipp"))] + let signers = { + let mut s = BTreeSet::new(); + s.insert((validator_1.clone(), last_block_height)); + s.insert(( + validator_1.clone(), + BlockHeight(last_block_height.0 - 1), + )); + s.insert((validator_2, last_block_height)); + s + }; + let events = vec![ + MultiSignedEthEvent { + event: ev_1, + signers: signers.clone(), + }, + MultiSignedEthEvent { + event: ev_2, + signers, + }, + ]; + + let digest = VextDigest { events, signatures }; + + // finally, decompress the `VextDigest` back into a + // `Vec>` + let decompressed = digest + .decompress(last_block_height) + .into_iter() + .collect::>>(); + + assert_eq!(decompressed.len(), ext.len()); + for vext in decompressed.into_iter() { + assert!(ext.contains(&vext)); + if vext.data.validator_addr == validator_1 { + assert!(vext.verify(&sk_1.ref_to()).is_ok()) + } else { + assert!(vext.verify(&sk_2.ref_to()).is_ok()) + } + } + } +} diff --git a/core/src/types/vote_extensions/validator_set_update.rs b/core/src/types/vote_extensions/validator_set_update.rs new file mode 100644 index 0000000000..1652896f13 --- /dev/null +++ b/core/src/types/vote_extensions/validator_set_update.rs @@ -0,0 +1,483 @@ +//! Contains types necessary for processing validator set updates +//! in vote extensions. +use std::cmp::Ordering; +use std::collections::HashMap; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use ethabi::ethereum_types as ethereum; +use num_rational::Ratio; + +use crate::proto::Signed; +use crate::types::address::Address; +use crate::types::eth_abi::{AbiEncode, Encode, Token}; +use crate::types::ethereum_events::{EthAddress, Uint}; +use crate::types::keccak::KeccakHash; +use crate::types::key::common::{self, Signature}; +use crate::types::storage::BlockHeight; +#[allow(unused_imports)] +use crate::types::storage::Epoch; +use crate::types::token; + +// the namespace strings plugged into validator set hashes +const BRIDGE_CONTRACT_NAMESPACE: &str = "bridge"; +const GOVERNANCE_CONTRACT_NAMESPACE: &str = "governance"; + +/// Type alias for a [`ValidatorSetUpdateVextDigest`]. +pub type VextDigest = ValidatorSetUpdateVextDigest; + +/// Contains the digest of all signatures from a quorum of +/// validators for a [`Vext`]. +#[derive( + Clone, Debug, PartialEq, Eq, BorshSerialize, BorshDeserialize, BorshSchema, +)] +pub struct ValidatorSetUpdateVextDigest { + /// A mapping from a validator address to a [`Signature`]. + /// + /// The key includes the block height at which a validator + /// set was signed by a given validator. + pub signatures: HashMap<(Address, BlockHeight), Signature>, + /// The addresses of the validators in the new [`Epoch`], + /// and their respective voting power. + pub voting_powers: VotingPowersMap, +} + +impl VextDigest { + /// Build a singleton [`VextDigest`], from the provided [`Vext`]. + #[inline] + pub fn singleton(ext: SignedVext) -> VextDigest { + VextDigest { + signatures: HashMap::from([( + (ext.data.validator_addr.clone(), ext.data.block_height), + ext.sig, + )]), + voting_powers: ext.data.voting_powers, + } + } + + /// Decompresses a set of signed [`Vext`] instances. + pub fn decompress(self, block_height: BlockHeight) -> Vec { + #[cfg(not(feature = "abcipp"))] + { + #[allow(clippy::drop_copy)] + drop(block_height); + } + + let VextDigest { + signatures, + voting_powers, + } = self; + + let mut extensions = vec![]; + + for (validator_addr, signature) in signatures.into_iter() { + let (validator_addr, _block_height) = validator_addr; + let voting_powers = voting_powers.clone(); + let data = Vext { + validator_addr, + voting_powers, + block_height, + }; + extensions.push(SignedVext::new_from(data, signature)); + } + extensions + } + + /// Returns an Ethereum ABI encoded string with the + /// params to feed to the Ethereum bridge smart contracts. + pub fn abi_params(&self) -> String { + todo!() + } +} + +/// Represents a [`Vext`] signed by some validator, with +/// an Ethereum key. +pub type SignedVext = Signed; + +/// Type alias for a [`ValidatorSetUpdateVext`]. +pub type Vext = ValidatorSetUpdateVext; + +/// Represents a validator set update, for some new [`Epoch`]. +#[derive( + Eq, PartialEq, Clone, Debug, BorshSerialize, BorshDeserialize, BorshSchema, +)] +pub struct ValidatorSetUpdateVext { + /// The addresses of the validators in the new [`Epoch`], + /// and their respective voting power. + /// + /// When signing a [`Vext`], this [`VotingPowersMap`] is converted + /// into two arrays: one for its keys, and another for its + /// values. The arrays are sorted in descending order based + /// on the voting power of each validator. + pub voting_powers: VotingPowersMap, + /// TODO: the validator's address is temporarily being included + /// until we're able to map a Tendermint address to a validator + /// address (see ) + pub validator_addr: Address, + /// The value of the Namada [`BlockHeight`] at the creation of this + /// [`Vext`]. + /// + /// An important invariant is that this [`BlockHeight`] will always + /// correspond to an epoch before the new validator set is installed. + /// + /// Since this is a monotonically growing sequence number, + /// it is signed together with the rest of the data to + /// prevent replay attacks on validator set updates. + /// + /// Additionally, we can use this [`BlockHeight`] value to query the + /// epoch with the appropriate validator set to verify signatures with + /// (i.e. the previous validator set). + pub block_height: BlockHeight, +} + +impl Vext { + /// Creates a new signed [`Vext`]. + /// + /// For more information, read the docs of [`SignedVext::new`]. + #[inline] + pub fn sign(&self, sk: &common::SecretKey) -> SignedVext { + SignedVext::new(sk, self.clone()) + } +} + +/// Container type for both kinds of Ethereum bridge addresses: +/// +/// - An address derived from a hot key. +/// - An address derived from a cold key. +#[derive( + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + BorshSerialize, + BorshDeserialize, + BorshSchema, +)] +pub struct EthAddrBook { + /// Ethereum address derived from a hot key. + pub hot_key_addr: EthAddress, + /// Ethereum address derived from a cold key. + pub cold_key_addr: EthAddress, +} + +/// Provides a mapping between [`EthAddress`] and [`token::Amount`] instances. +pub type VotingPowersMap = HashMap; + +/// This trait contains additional methods for a [`VotingPowersMap`], related +/// with validator set update vote extensions logic. +pub trait VotingPowersMapExt { + /// Returns the list of Ethereum validator hot and cold addresses and their + /// respective voting powers (in this order), with an Ethereum ABI + /// compatible encoding. Implementations of this method must be + /// deterministic based on `self`. In addition, the returned `Vec`s must be + /// sorted in descending order by voting power, as this is more efficient to + /// deal with on the Ethereum side when working out if there is enough + /// voting power for a given validator set update. + fn get_abi_encoded(&self) -> (Vec, Vec, Vec); + + /// Returns the keccak hashes of this [`VotingPowersMap`], + /// to be signed by an Ethereum hot and cold key, respectively. + fn get_bridge_and_gov_hashes( + &self, + block_height: BlockHeight, + ) -> (KeccakHash, KeccakHash) { + let (hot_key_addrs, cold_key_addrs, voting_powers) = + self.get_abi_encoded(); + + let bridge_hash = compute_hash( + block_height, + BRIDGE_CONTRACT_NAMESPACE, + hot_key_addrs, + voting_powers.clone(), + ); + + let governance_hash = compute_hash( + block_height, + GOVERNANCE_CONTRACT_NAMESPACE, + cold_key_addrs, + voting_powers, + ); + + (bridge_hash, governance_hash) + } +} + +/// Compare two items of [`VotingPowersMap`]. This comparison operation must +/// match the equivalent comparison operation in Ethereum bridge code. +fn compare_voting_powers_map_items( + first: &(&EthAddrBook, &token::Amount), + second: &(&EthAddrBook, &token::Amount), +) -> Ordering { + let (first_power, second_power) = (first.1, second.1); + let (first_addr, second_addr) = (first.0, second.0); + match second_power.cmp(first_power) { + Ordering::Equal => first_addr.cmp(second_addr), + ordering => ordering, + } +} + +impl VotingPowersMapExt for VotingPowersMap { + fn get_abi_encoded(&self) -> (Vec, Vec, Vec) { + // get addresses and voting powers all into one vec so that they can be + // sorted appropriately + let mut unsorted: Vec<_> = self.iter().collect(); + unsorted.sort_by(compare_voting_powers_map_items); + let sorted = unsorted; + + let total_voting_power: u64 = sorted + .iter() + .map(|&(_, &voting_power)| u64::from(voting_power)) + .sum(); + + // split the vec into three portions + sorted.into_iter().fold( + Default::default(), + |accum, (addr_book, &voting_power)| { + let voting_power: u64 = voting_power.into(); + + // normalize the voting power + // https://github.com/anoma/ethereum-bridge/blob/fe93d2e95ddb193a759811a79c8464ad4d709c12/test/utils/utilities.js#L29 + const NORMALIZED_VOTING_POWER: u64 = 1 << 32; + + let voting_power = Ratio::new(voting_power, total_voting_power) + * NORMALIZED_VOTING_POWER; + let voting_power = voting_power.round().to_integer(); + let voting_power: ethereum::U256 = voting_power.into(); + + let (mut hot_key_addrs, mut cold_key_addrs, mut voting_powers) = + accum; + let &EthAddrBook { + hot_key_addr: EthAddress(hot_key_addr), + cold_key_addr: EthAddress(cold_key_addr), + } = addr_book; + + hot_key_addrs + .push(Token::Address(ethereum::H160(hot_key_addr))); + cold_key_addrs + .push(Token::Address(ethereum::H160(cold_key_addr))); + voting_powers.push(Token::Uint(voting_power)); + + (hot_key_addrs, cold_key_addrs, voting_powers) + }, + ) + } +} + +/// Convert a [`BlockHeight`] to a [`Token`]. +#[inline] +fn bheight_to_token(BlockHeight(h): BlockHeight) -> Token { + Token::Uint(h.into()) +} + +/// Compute the keccak hash of a validator set update. +/// +/// For more information, check the Ethereum bridge smart contracts: +// - +// - +#[inline] +fn compute_hash( + block_height: BlockHeight, + namespace: &str, + validators: Vec, + voting_powers: Vec, +) -> KeccakHash { + AbiEncode::keccak256(&[ + Token::String(namespace.into()), + Token::Array(validators), + Token::Array(voting_powers), + bheight_to_token(block_height), + ]) +} + +/// Struct for serializing validator set +/// arguments with ABI for Ethereum smart +/// contracts. +#[derive(Debug, Clone, Default)] +pub struct ValidatorSetArgs { + /// Ethereum address of validators + pub validators: Vec, + /// Voting powers of validators + pub powers: Vec, + /// A nonce + pub nonce: Uint, +} + +impl Encode<1> for ValidatorSetArgs { + fn tokenize(&self) -> [Token; 1] { + let addrs = Token::Array( + self.validators + .iter() + .map(|addr| Token::Address(addr.0.into())) + .collect(), + ); + let powers = Token::Array( + self.powers + .iter() + .map(|power| Token::Uint(power.clone().into())) + .collect(), + ); + let nonce = Token::Uint(self.nonce.clone().into()); + [Token::Tuple(vec![addrs, powers, nonce])] + } +} + +// this is only here so we don't pollute the +// outer namespace with serde traits +mod tag { + use serde::{Deserialize, Serialize}; + + use super::{bheight_to_token, Vext, VotingPowersMapExt}; + use crate::proto::SignedSerialize; + use crate::types::eth_abi::{AbiEncode, Encode, Token}; + use crate::types::keccak::KeccakHash; + + /// Tag type that indicates we should use [`AbiEncode`] + /// to sign data in a [`crate::proto::Signed`] wrapper. + #[derive(Eq, PartialEq, Clone, Debug, Serialize, Deserialize)] + pub struct SerializeWithAbiEncode; + + impl SignedSerialize for SerializeWithAbiEncode { + type Output = [u8; 32]; + + fn serialize(ext: &Vext) -> Self::Output { + let (KeccakHash(bridge_hash), KeccakHash(gov_hash)) = ext + .voting_powers + .get_bridge_and_gov_hashes(ext.block_height); + let KeccakHash(output) = AbiEncode::signed_keccak256(&[ + Token::String("updateValidatorsSet".into()), + Token::FixedBytes(bridge_hash.to_vec()), + Token::FixedBytes(gov_hash.to_vec()), + bheight_to_token(ext.block_height), + ]); + output + } + } +} + +#[doc(inline)] +pub use tag::SerializeWithAbiEncode; + +#[cfg(test)] +mod tests { + use data_encoding::HEXLOWER; + + use super::*; + + /// Test the keccak hash of a validator set update + #[test] + fn test_validator_set_update_keccak_hash() { + // ```js + // const ethers = require('ethers'); + // const keccak256 = require('keccak256') + // + // const abiEncoder = new ethers.utils.AbiCoder(); + // + // const output = abiEncoder.encode( + // ['string', 'address[]', 'uint256[]', 'uint256'], + // ['bridge', [], [], 1], + // ); + // + // const hash = keccak256(output).toString('hex'); + // + // console.log(hash); + // ``` + const EXPECTED: &str = + "694d9bc27d5da7444e5742b13394b2c8a7e73b43d6acd52b6e23b26b612f7c86"; + + let KeccakHash(got) = compute_hash( + 1u64.into(), + BRIDGE_CONTRACT_NAMESPACE, + vec![], + vec![], + ); + + assert_eq!(&HEXLOWER.encode(&got[..]), EXPECTED); + } + + /// Checks that comparing two [`VotingPowersMap`] items which have the same + /// voting powers but different [`EthAddrBook`]s does not result in them + /// being regarded as equal. + #[test] + fn test_compare_voting_powers_map_items_identical_voting_powers() { + let same_voting_power = 200.into(); + + let validator_a = EthAddrBook { + hot_key_addr: EthAddress([0; 20]), + cold_key_addr: EthAddress([0; 20]), + }; + let validator_b = EthAddrBook { + hot_key_addr: EthAddress([1; 20]), + cold_key_addr: EthAddress([1; 20]), + }; + + assert_eq!( + compare_voting_powers_map_items( + &(&validator_a, &same_voting_power), + &(&validator_b, &same_voting_power), + ), + Ordering::Less + ); + } + + /// Checks that comparing two [`VotingPowersMap`] items with different + /// voting powers results in the item with the lesser voting power being + /// regarded as "greater". + #[test] + fn test_compare_voting_powers_map_items_different_voting_powers() { + let validator_a = EthAddrBook { + hot_key_addr: EthAddress([0; 20]), + cold_key_addr: EthAddress([0; 20]), + }; + let validator_a_voting_power = 200.into(); + let validator_b = EthAddrBook { + hot_key_addr: EthAddress([1; 20]), + cold_key_addr: EthAddress([1; 20]), + }; + let validator_b_voting_power = 100.into(); + + assert_eq!( + compare_voting_powers_map_items( + &(&validator_a, &validator_a_voting_power), + &(&validator_b, &validator_b_voting_power), + ), + Ordering::Less + ); + } + + /// Checks that [`VotingPowersMapExt::get_abi_encoded`] gives a + /// deterministic result in the case where there are multiple validators + /// with the same voting power. + /// + /// NB: this test may pass even if the implementation is not + /// deterministic unless the test is run with the `--release` profile, as it + /// is implicitly relying on how iterating over a [`HashMap`] seems to + /// return items in the order in which they were inserted, at least for this + /// very small 2-item example. + #[test] + fn test_voting_powers_map_get_abi_encoded_deterministic_with_identical_voting_powers() + { + let validator_a = EthAddrBook { + hot_key_addr: EthAddress([0; 20]), + cold_key_addr: EthAddress([0; 20]), + }; + let validator_b = EthAddrBook { + hot_key_addr: EthAddress([1; 20]), + cold_key_addr: EthAddress([1; 20]), + }; + let same_voting_power = 200.into(); + + let mut voting_powers_1 = VotingPowersMap::default(); + voting_powers_1.insert(validator_a.clone(), same_voting_power); + voting_powers_1.insert(validator_b.clone(), same_voting_power); + + let mut voting_powers_2 = VotingPowersMap::default(); + voting_powers_2.insert(validator_b, same_voting_power); + voting_powers_2.insert(validator_a, same_voting_power); + + let x = voting_powers_1.get_abi_encoded(); + let y = voting_powers_2.get_abi_encoded(); + assert_eq!(x, y); + } +} diff --git a/core/src/types/voting_power.rs b/core/src/types/voting_power.rs new file mode 100644 index 0000000000..d15615a9cb --- /dev/null +++ b/core/src/types/voting_power.rs @@ -0,0 +1,154 @@ +//! This module contains types related with validator voting power calculations. + +use std::iter::Sum; +use std::ops::{Add, AddAssign}; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use eyre::{eyre, Result}; +use num_rational::Ratio; + +/// A fraction of the total voting power. This should always be a reduced +/// fraction that is between zero and one inclusive. +#[derive(Clone, PartialOrd, Ord, PartialEq, Eq, Hash, Debug)] +pub struct FractionalVotingPower(Ratio); + +impl FractionalVotingPower { + /// Two thirds of the voting power. + pub const TWO_THIRDS: FractionalVotingPower = + FractionalVotingPower(Ratio::new_raw(2, 3)); + + /// Create a new FractionalVotingPower. It must be between zero and one + /// inclusive. + pub fn new(numer: u64, denom: u64) -> Result { + if denom == 0 { + return Err(eyre!("denominator can't be zero")); + } + let ratio: Ratio = (numer, denom).into(); + if ratio > 1.into() { + return Err(eyre!( + "fractional voting power cannot be greater than one" + )); + } + Ok(Self(ratio)) + } +} + +impl Default for FractionalVotingPower { + fn default() -> Self { + Self::new(0, 1).unwrap() + } +} + +impl From<&FractionalVotingPower> for (u64, u64) { + fn from(ratio: &FractionalVotingPower) -> Self { + (ratio.0.numer().to_owned(), ratio.0.denom().to_owned()) + } +} + +impl Sum for FractionalVotingPower { + fn sum>(iter: I) -> Self { + iter.fold(Self::default(), Add::add) + } +} + +impl Add for FractionalVotingPower { + type Output = Self; + + fn add(self, rhs: FractionalVotingPower) -> Self::Output { + Self(self.0 + rhs.0) + } +} + +impl Add<&FractionalVotingPower> for FractionalVotingPower { + type Output = Self; + + fn add(self, rhs: &FractionalVotingPower) -> Self::Output { + Self(self.0 + rhs.0) + } +} + +impl AddAssign for FractionalVotingPower { + fn add_assign(&mut self, rhs: FractionalVotingPower) { + *self = Self(self.0 + rhs.0) + } +} + +impl AddAssign<&FractionalVotingPower> for FractionalVotingPower { + fn add_assign(&mut self, rhs: &FractionalVotingPower) { + *self = Self(self.0 + rhs.0) + } +} + +impl BorshSerialize for FractionalVotingPower { + fn serialize( + &self, + writer: &mut W, + ) -> std::io::Result<()> { + let (numer, denom): (u64, u64) = self.into(); + (numer, denom).serialize(writer) + } +} + +impl BorshDeserialize for FractionalVotingPower { + fn deserialize(buf: &mut &[u8]) -> std::io::Result { + let (numer, denom): (u64, u64) = BorshDeserialize::deserialize(buf)?; + Ok(FractionalVotingPower(Ratio::::new(numer, denom))) + } +} + +impl BorshSchema for FractionalVotingPower { + fn add_definitions_recursively( + definitions: &mut std::collections::HashMap< + borsh::schema::Declaration, + borsh::schema::Definition, + >, + ) { + let fields = + borsh::schema::Fields::UnnamedFields(borsh::maybestd::vec![ + u64::declaration(), + u64::declaration() + ]); + let definition = borsh::schema::Definition::Struct { fields }; + Self::add_definition(Self::declaration(), definition, definitions); + } + + fn declaration() -> borsh::schema::Declaration { + "FractionalVotingPower".into() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + /// This test is ultimately just exercising the underlying + /// library we use for fractions, we want to make sure + /// operators work as expected with our FractionalVotingPower + /// type itself + #[test] + fn test_fractional_voting_power_ord_eq() { + assert!( + FractionalVotingPower::TWO_THIRDS + > FractionalVotingPower::new(1, 4).unwrap() + ); + assert!( + FractionalVotingPower::new(1, 3).unwrap() + > FractionalVotingPower::new(1, 4).unwrap() + ); + assert!( + FractionalVotingPower::new(1, 3).unwrap() + == FractionalVotingPower::new(2, 6).unwrap() + ); + } + + /// Test error handling on the FractionalVotingPower type + #[test] + fn test_fractional_voting_power_valid_fractions() { + assert!(FractionalVotingPower::new(0, 0).is_err()); + assert!(FractionalVotingPower::new(1, 0).is_err()); + assert!(FractionalVotingPower::new(0, 1).is_ok()); + assert!(FractionalVotingPower::new(1, 1).is_ok()); + assert!(FractionalVotingPower::new(1, 2).is_ok()); + assert!(FractionalVotingPower::new(3, 2).is_err()); + } +} diff --git a/documentation/specs/src/SUMMARY.md b/documentation/specs/src/SUMMARY.md index 94c6390400..267409fe92 100644 --- a/documentation/specs/src/SUMMARY.md +++ b/documentation/specs/src/SUMMARY.md @@ -9,6 +9,7 @@ - [Multisignature account](./base-ledger/multisignature.md) - [Fungible token](./base-ledger/fungible-token.md) - [Replay protection](./base-ledger/replay-protection.md) + - [Block space allocator](./base-ledger/block-space-allocator.md) - [Multi-asset shielded pool](./masp.md) - [Ledger integration](./masp/ledger-integration.md) - [Asset type](./masp/asset-type.md) @@ -17,6 +18,13 @@ - [Trusted setup](./masp/trusted-setup.md) - [Interoperability](./interoperability.md) - [Ethereum bridge](./interoperability/ethereum-bridge.md) + - [Security](./interoperability/ethereum-bridge/security.md) + - [Bootstrapping](./interoperability/ethereum-bridge/bootstrapping.md) + - [Ethereum events attestation](./interoperability/ethereum-bridge/ethereum_events_attestation.md) + - [Transfers to Namada](./interoperability/ethereum-bridge/transfers_to_namada.md) + - [Transfers to Ethereum](./interoperability/ethereum-bridge/transfers_to_ethereum.md) + - [Proofs](./interoperability/ethereum-bridge/proofs.md) + - [Ethereum smart contracts](./interoperability/ethereum-bridge/ethereum_smart_contracts.md) - [IBC](./interoperability/ibc.md) - [Economics](./economics.md) - [Fee system](./economics/fee-system.md) diff --git a/documentation/specs/src/base-ledger/block-space-allocator.md b/documentation/specs/src/base-ledger/block-space-allocator.md new file mode 100644 index 0000000000..aa05dcbaea --- /dev/null +++ b/documentation/specs/src/base-ledger/block-space-allocator.md @@ -0,0 +1,206 @@ +# Block space allocator + +Block space in Tendermint is a resource whose management is relinquished to the +running application. This section covers the design of an abstraction that +facilitates the process of transparently allocating space for transactions in a +block at some height $H$, whilst upholding the safety and liveness properties +of Namada. + +## On block sizes in Tendermint and Namada + +[Block sizes in Tendermint] +(configured through the $MaxBytes$ consensus +parameter) have a minimum value of $1\ \text{byte}$, and a hard cap of $100\ +MiB$, reflecting the header, evidence of misbehavior (used to slash +Byzantine validators) and transaction data, as well as any potential protobuf +serialization overhead. Some of these data are dynamic in nature (e.g. +evidence of misbehavior), so the total size reserved to transactions in a block +at some height $H_0$ might not be the same as another block's, say, at some +height $H_1 : H_1 \ne H_0$. During Tendermint's `PrepareProposal` ABCI phase, +applications receive a $MaxTxBytes$ parameter whose value already accounts for +the total space available for transactions at some height $H$. Namada does not +rely on the $MaxTxBytes$ parameter of `RequestPrepareProposal`; instead, +app-side validators configure a $MaxProposalSize$ parameter at genesis (or +through governance) and set Tendermint blocks' $MaxBytes$ parameter to its +upper bound. + +[Block sizes in Tendermint]: + +## Transaction batch construction + +During Tendermint's `PrepareProposal` ABCI phase, Namada (the ABCI server) is +fed a set of transactions $M = \{\ tx\ |\ tx\text{ in Tendermint's mempool}\ +\}$, whose total combined size (i.e. the sum of the bytes occupied by each $tx +: tx \in M$) may be greater than $MaxProposalBytes$. Therefore, consensus round +leaders are responsible for selecting a batch of transactions $P$ whose total +combined bytes $P_{Len} \le MaxProposalBytes$. + +To stay within these bounds, block space is **allotted** to different kinds of +transactions: decrypted, protocol and encrypted transactions. Each kind of +transaction gets about $\frac{1}{3} MaxProposalBytes$ worth of allotted space, +in an abstract container dubbed the `TxBin`. A transaction $tx : tx \in M$ may +be **dumped** to a `TxBin`, resulting in a successful operation, or an error, +if $tx$ is **rejected** due to lack of space in the `TxBin` or if $tx$'s size +**overflows** (i.e. does not fit in) the `TxBin`. Block proposers continue +dumping transactions from $M$ into a `TxBin` $B$ until a rejection error is +encountered, or until there are no more transactions of the same type as $B$'s +in $M$. The `BlockSpaceAllocator` contains three `TxBin` instances, responsible +for holding decrypted, protocol and encrypted transactions. + +block space allocator tx bins + +During occasional Namada protocol events, such as DKG parameter negotiation, +all available block space should be reserved to protocol transactions, +therefore the `BlockSpaceAllocator` was designed as a state machine, whose +state transitions depend on the state of Namada. The states of the +`BlockSpaceAllocator` are the following: + +1. `BuildingDecryptedTxBatch` - As the name implies, during this state the +decrypted transactions `TxBin` is filled with transactions of the same type. +Honest block proposers will only include decrypted transactions in a block at a +fixed height $H_0$ if encrypted transactions were available at $H_0 - 1$. The +decrypted transactions should be included in the same order of the encrypted +transactions of block $H_0 - 1$. Likewise, all decrypted transactions available +at $H_0$ must be included. +2. `BuildingProtocolTxBatch` - In a similar manner, during this +`BlockSpaceAllocator` state, the protocol transactions `TxBin` is populated +with transactions of the same type. Contrary to the first state, allocation +stops as soon as the respective `TxBin` runs out of space for some +$tx_{Protocol} : tx_{Protocol} \in M$. The `TxBin` for protocol transactions is +allotted half of the remaining block space, after decrypted transactions have +been **allocated**. +3. `BuildingEncryptedTxBatch` - This state behaves a lot like the previous +state, with one addition: it takes a parameter that guards the encrypted +transactions `TxBin`, which in effect splits the state into two sub-states. +When `WithEncryptedTxs` is active, we fill block space with encrypted +transactions (as the name implies); orthogonal to this mode of operation, there +is `WithoutEncryptedTxs`, which, as the name implies, does not allow encrypted +transactions to be included in a block. The `TxBin` for encrypted transactions +is allotted $\min(R,\frac{1}{3} MaxProposalBytes)$ bytes, where $R$ is the +block space remaining after allocating space for decrypted and protocol +transactions. +4. `FillingRemainingSpace` - The final state of the `BlockSpaceAllocator`. Due +to the short-circuit behavior of a `TxBin`, on allocation errors, some space +may be left unutilized at the end of the third state. At this state, the only +kinds of +transactions that are left to fill the available block space are +of type encrypted and protocol, but encrypted transactions are forbidden +to be included, to avoid breaking their invariant regarding +allotted block space (i.e. encrypted transactions can only occupy up to +$\frac{1}{3}$ of the total block space for a given height $H$). As such, +only protocol transactions are allowed at the fourth and final state of +the `BlockSpaceAllocator`. + +For a fixed block height $H_0$, if at $H_0 - 1$ and $H_0$ no encrypted +transactions are included in the respective proposals, the block decided for +height $H_0$ will only contain protocol transactions. Similarly, since at most +$\frac{1}{3}$ of the available block space at a fixed height $H_1$ is reserved +to encrypted transactions, and decrypted transactions at $H_1+1$ will take up +(at most) the same amount of space as encrypted transactions at height $H_1$, +each transaction kind's `TxBin` will generally get allotted about $\frac{1}{3}$ +of the available block space. + +### Example + +Consider the following diagram: + +block space allocator example + +We denote `D`, `P` and `E` as decrypted, protocol and encrypted transactions, +respectively. + +* At height $H$, block space is evenly divided in three parts, one for each +kind of transaction type. +* At height $H+1$, we do not include encrypted transactions in the proposal, +therefore protocol transactions are allowed to take up to $\frac{2}{3}$ of the +available block space. +* At height $H+2$, no encrypted transactions are included either. Notice that +no decrypted transactions were included in the proposal, since at height $H+1$ +we did not decide on any encrypted transactions. In sum, only protocol +transactions are included in the proposal for the block with height $H+2$. +* At height $H+3$, we propose encrypted transactions once more. Just like in +the previous scenario, no decrypted transactions are available. Encrypted +transactions are capped at $\frac{1}{3}$ of the available block space, so the +remaining $\frac{1}{2} - \frac{1}{3} = \frac{1}{6}$ of the available block +space is filled with protocol transactions. +* At height $H+4$, allocation returns to its normal operation, thus block space +is divided in three equal parts for each kind of transaction type. + +## Transaction batch validation + +Batches of transactions proposed during ABCI's `PrepareProposal` phase are +validated at the `ProcessProposal` phase. The validation conditions are +relaxed, compared to the rigid block structure imposed on blocks during +`PrepareProposal` (i.e. with decrypted, protocol and encrypted transactions +appearing in this order, as [examplified above](#example)). Let us fix $H$ as +the height of the block $B$ currently being decided through Tendermint's +consensus mechanism, $P$ as the batch of transactions proposed at $H$ as $B$'s +payload and $V$ as the current set of active validators. To vote on $P$, each +validator $v \in V$ checks: + +* If the length of $P$ in bytes, defined as $P_{Len} := \sum_{tx \in +P} \text{size\_of}(tx)$, is not greater than $MaxProposalBytes$. +* If $P$ does not contain more than $\frac{1}{3} MaxProposalBytes$ worth of +encrypted transactions. + - While not directly checked, our batch construction invariants guarantee +that we will constrain decrypted transactions to occupy up to $\frac{1}{3} +MaxProposalBytes$ bytes of the available block space at $H$ (or any block +height, in fact). +* If all decrypted transactions from $H-1$ have been included in the proposal +$P$, for height $H$. +* That no encrypted transactions were included in the proposal $P$, if no +encrypted transactions should be included at $H$. + - N.b. the conditions to reject encrypted transactions are still not clearly + specced out, therefore they will be left out of this section, for the + time being. + +Should any of these conditions not be met at some arbitrary round $R$ of $H$, +all honest validators $V_h : V_h \subseteq V$ will reject the proposal $P$. +Byzantine validators are permitted to re-order the layout of $P$ typically +derived from the [`BlockSpaceAllocator`](#transaction-batch-construction) $A$, +under normal operation, however this should not be a compromising factor of the +safety and liveness properties of Namada. The rigid layout of $B$ is simply a +consequence of $A$ allocating in different phases. + +### On validator set updates + +Validator set updates, one type of protocol transactions decided through BFT +consensus in Namada, are fundamental to the liveness properties of the Ethereum +bridge, thus, ideally we would also check if these would be included once per +epoch at the `ProcessProposal` stage. Unfortunately, achieving a quorum of +signatures for a validator set update between two adjacent block heights +through ABCI alone is not feasible. Hence, the Ethereum bridge is not a live +distributed system, since there is the possibility to cross an epoch boundary +without constructing a valid proof for some validator set update. In practice, +however, it is nearly impossible for the bridge to get "stuck", as validator +set updates are eagerly issued at the start of an epoch, whose length should be +long enough for consensus(*) to be reached on a single validator set update. + +(*) Note that we loosely used consensus here to refer to the process of +acquiring a quorum (e.g. more than $\frac{2}{3}$ of voting power, by stake) of +signatures on a single validator set update. "Chunks" of a proof (i.e. +individual votes) are decided and batched together, until a complete proof is +constructed. + +We cover validator set updates in detail in [the Ethereum bridge section]. + +[the Ethereum bridge section]: ../interoperability/ethereum-bridge.md + +## Governance + +Governance parameter update proposals for $MaxProposalBytes_H$ that take effect +at $H$, where $H$ is some arbitrary block height, should be such that +$MaxProposalBytes_H \ge \frac{1}{3} MaxProposalBytes_{H-1}$, to leave enough +room for all decrypted transactions from $H-1$ at $H$. Subsequent block heights +$H' : H' > H$ should eventually lead to allotted block space converging to about +$\frac{1}{3} MaxProposalBytes_H$ for each kind of transaction type. diff --git a/documentation/specs/src/base-ledger/images/block-space-allocator-bins.svg b/documentation/specs/src/base-ledger/images/block-space-allocator-bins.svg new file mode 100644 index 0000000000..f9d7209da1 --- /dev/null +++ b/documentation/specs/src/base-ledger/images/block-space-allocator-bins.svg @@ -0,0 +1,4 @@ + + + +
DECRYPTED
DECRYPT...
E
E
E
E
E
E
E
E
E
E
E
E
E
E
bin.try_dump(tx)
bin.try_dump(tx)
PROTOCOL
PROTOC...
ENCRYPTED
ENCRYPT...
Set M of mempool transactions
Set P of proposed transactions
BlockSpaceAllocator
BlockSpaceAllocator
Viewer does not support full SVG 1.1
diff --git a/documentation/specs/src/base-ledger/images/block-space-allocator-example.svg b/documentation/specs/src/base-ledger/images/block-space-allocator-example.svg new file mode 100644 index 0000000000..b19ad90ce7 --- /dev/null +++ b/documentation/specs/src/base-ledger/images/block-space-allocator-example.svg @@ -0,0 +1,4 @@ + + + +
Height
Height
H
H
D
D
P
P
E
E
H+1
H+1
D
D
P
P
H+2
H+2
P
P
H+3
H+3
P
P
E
E
P
P
H+4
H+4
E
E
P
P
D
D
Block space
Block space
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/documentation/specs/src/interoperability/ethereum-bridge.md b/documentation/specs/src/interoperability/ethereum-bridge.md index 6d5370ea4e..ed2dc58c00 100644 --- a/documentation/specs/src/interoperability/ethereum-bridge.md +++ b/documentation/specs/src/interoperability/ethereum-bridge.md @@ -2,37 +2,35 @@ The Namada - Ethereum bridge exists to mint ERC20 tokens on Namada which naturally can be redeemed on Ethereum at a later time. Furthermore, it -allows the minting of wrapped tokens on Ethereum backed by escrowed assets on -Namada. +allows the minting of wrapped NAM (wNAM) tokens on Ethereum. The Namada Ethereum bridge system consists of: + * An Ethereum full node run by each Namada validator, for including relevant Ethereum events into Namada. * A set of validity predicates on Namada which roughly implements [ICS20](https://docs.cosmos.network/v0.42/modules/ibc/) fungible token transfers. * A set of Ethereum smart contracts. -* A relayer for submitting transactions to Ethereum +* An automated process to send validator set updates to the Ethereum smart + contracts. +* A relayer binary to aid in submitting transactions to Ethereum This basic bridge architecture should provide for almost-Namada consensus security for the bridge and free Ethereum state reads on Namada, plus bidirectional message passing with reasonably low gas costs on the Ethereum side. -## Security -On Namada, the validators are full nodes of Ethereum and their stake is also -accounting for security of the bridge. If they carry out a forking attack -on Namada to steal locked tokens of Ethereum their stake will be slashed on Namada. -On the Ethereum side, we will add a limit to the amount of assets that can be -locked to limit the damage a forking attack on Namada can do. To make an attack -more cumbersome we will also add a limit on how fast wrapped Ethereum assets can -be redeemed from Namada. This will not add more security, but rather make the -attack more inconvenient. - -## Ethereum Events Attestation -We want to store events from the smart contracts of our bridge onto Namada. We -will include events that have been seen by at least one validator, but will not -act on them until they have been seen by at least 2/3 of voting power. +## Topics + - [Bootstrapping](./ethereum-bridge/bootstrapping.md) + - [Security](./ethereum-bridge/security.md) + - [Ethereum Events Attestation](./ethereum-bridge/ethereum_events_attestation.md) + - [Transfers from Ethereum to Namada](./ethereum-bridge/transfers_to_namada.md) + - [Transfers from Namada to Ethereum](./ethereum-bridge/transfers_to_ethereum.md) + - [Proofs and validator set updates](./ethereum-bridge/proofs.md) + - [Smart Contracts](./ethereum-bridge/ethereum_smart_contracts.md) + +## Resources which may be helpful There will be multiple types of events emitted. Validators should ignore improperly formatted events. Raw events from Ethereum are converted to a @@ -392,4 +390,3 @@ Namada. - [ICS20](https://github.com/cosmos/ibc/tree/master/spec/app/ics-020-fungible-token-transfer) - [Rainbow Bridge contracts](https://github.com/aurora-is-near/rainbow-bridge/tree/master/contracts) - [IBC in Solidity](https://github.com/hyperledger-labs/yui-ibc-solidity) - diff --git a/documentation/specs/src/interoperability/ethereum-bridge/bootstrapping.md b/documentation/specs/src/interoperability/ethereum-bridge/bootstrapping.md new file mode 100644 index 0000000000..876f83ec09 --- /dev/null +++ b/documentation/specs/src/interoperability/ethereum-bridge/bootstrapping.md @@ -0,0 +1,117 @@ +# Bootstrapping the bridge + +## Overview + +The Ethereum bridge is not enabled at the launch of a Namada chain. Instead, +there are two governance parameters: + +- `eth_bridge_proxy_address` +- `eth_bridge_wnam_address` + +Both are initialized to the zero Ethereum address +(`"0x0000000000000000000000000000000000000000"`). An overview of the steps to +enable the Ethereum bridge for a given Namada chain are: + +- A governance proposal should be held to agree on a block height `h` at which + to launch the Ethereum bridge by means of a hard fork. +- If the proposal passes, the Namada chain must halt after finalizing block + `h-1`. This requires +- The [Ethereum bridge smart contracts](./ethereum_smart_contracts.md) are + deployed to the relevant EVM chain, with the active validator set at block + height `h` as the initial validator set that controls the bridge. +- Details are published so that the deployed contracts can be verified by anyone + who wishes to do so. +- If active validators for block height `h` regard the deployment as valid, the + chain should be restarted with a new genesis file that specifies + `eth_bridge_proxy_address` as the Ethereum address of the proxy contract. + +At this point, the bridge is launched and it may start being used. Validators' +ledger nodes will immediately and automatically coordinate in order to craft the +first validator set update protocol transaction. + +## Facets + +### Governance proposal + +The governance proposal can be freeform and simply indicate what the value of +`h` should be. Validators should then configure their nodes to halt at this +height. The `grace_epoch` is arbitrary as there is no code to be executed as +part of the proposal, instead validators must take action manually as soon as +the proposal passes. The block height `h` must be in an epoch that is strictly +greater than `voting_end_epoch`. + +### Value for launch height `h` + +The active validator set at the launch height chosen for starting the Ethereum +bridge will have the extra responsibility of restarting the chain if they +consider the deployed smart contracts as valid. For this reason, the validator +set at this height must be known in advance of the governance proposal +resolving, and a channel set up for offchain communication and co-ordination of +the chain restart. In practise, this means the governance proposal to launch the +chain should commit to doing so within an epoch of passing, so that the +validator set is definitely known in advance. + +### Deployer + +Once the smart contracts are fully deployed, only the active validator set for +block height `h` should have control of the contracts so in theory anyone could +do the Ethereum bridge smart contract deployment. + +### Backing out of Ethereum bridge launch + +If for some reason the validity of the smart contract deployment cannot be +agreed upon by the validators who will responsible for restarting Namada, it +must remain possible to restart the chain with the Ethereum bridge still not +enabled. + +## Example + +In this example, all epochs are assumed to be `100` blocks long, and the active +validator set does not change at any point. + +- A governance proposal is made to launch the Ethereum bridge at height `h = + 3400`, i.e. the first block of epoch `34`. + +```json +{ + "content": { + "title": "Launch the Ethereum bridge", + "authors": "hello@heliax.dev", + "discussions-to": "hello@heliax.dev", + "created": "2023-01-01T08:00:00Z", + "license": "Unlicense", + "abstract": "Halt the chain and launch the Ethereum bridge at Namada block height 3400", + "motivation": "", + }, + "author": "hello@heliax.dev", + "voting_start_epoch": 30, + "voting_end_epoch": 33, + "grace_epoch": 33, +} +``` + +- The governance proposal passes at block `3300` (the first block of epoch `33`) + +- Validators for epoch `33` manually configure their nodes to halt after having + finalized block `3399`, before that block is reached + +- The chain halts after having finalized block `3399` (the last block of epoch + `33`) + +- Putative Ethereum bridge smart contracts are deployed at this point, with the + proxy contract located at `0x00000000000000000000000000000000DeaDBeef` + +- Verification of the Ethereum bridge smart contracts take place + +- Validators coordinate to craft a new genesis file for the chain restart at + `3400`, with the governance parameter `eth_bridge_proxy_address` set to + `0x00000000000000000000000000000000DeaDBeef` and `eth_bridge_wnam_address` at + `0x000000000000000000000000000000000000Cafe` + +- The chain restarts at `3400` (the first block of epoch `34`) + +- The first ever validator set update (for epoch `35`) becomes possible within a + few blocks (e.g. by block `3410`) + +- A validator set update for epoch `35` is submitted to the Ethereum bridge + smart contracts diff --git a/documentation/specs/src/interoperability/ethereum-bridge/ethereum_events_attestation.md b/documentation/specs/src/interoperability/ethereum-bridge/ethereum_events_attestation.md new file mode 100644 index 0000000000..1c8bdd3095 --- /dev/null +++ b/documentation/specs/src/interoperability/ethereum-bridge/ethereum_events_attestation.md @@ -0,0 +1,147 @@ +# Ethereum Events Attestation + +We want to store events from the smart contracts of our bridge onto Namada. We +will include events that have been seen by at least one validator, but will not +act on them until they have been seen by at least 2/3 of voting power. + +There will be multiple types of events emitted. Validators should +ignore improperly formatted events. Raw events from Ethereum are converted to a +Rust enum type (`EthereumEvent`) by Namada validators before being included +in vote extensions or stored on chain. + +```rust +pub enum EthereumEvent { + // we will have different variants here corresponding to different types + // of raw events we receive from Ethereum + TransfersToNamada(Vec) + // ... +} +``` + +Each event will be stored with a list of the validators that have ever seen it +as well as the fraction of total voting power that has ever seen it. +Once an event has been seen by 2/3 of voting power, it is locked into a +`seen` state, and acted upon. + +There is no adjustment across epoch boundaries - e.g. if an event is seen by 1/3 +of voting power in epoch n, then seen by a different 1/3 of voting power in +epoch m>n, the event will be considered `seen` in total. Validators may never +vote more than once for a given event. + +## Minimum confirmations +There will be a protocol-specified minimum number of confirmations that events +must reach on the Ethereum chain, before validators can vote to include them +on Namada. This minimum number of confirmations will be changeable via +governance. + +`TransferToNamada` events may include a custom minimum number of +confirmations, that must be at least the protocol-specified minimum number of +confirmations but is initially set to __100__. + +Validators must not vote to include events that have not met the required +number of confirmations. Voting on unconfirmed events is considered a +slashable offence. + +## Storage +To make including new events easy, we take the approach of always overwriting +the state with the new state rather than applying state diffs. The storage +keys involved are: +``` +# all values are Borsh-serialized +/eth_msgs/\$msg_hash/body : EthereumEvent +/eth_msgs/\$msg_hash/seen_by : BTreeSet
+/eth_msgs/\$msg_hash/voting_power: (u64, u64) # reduced fraction < 1 e.g. (2, 3) +/eth_msgs/\$msg_hash/seen: bool +``` + +`\$msg_hash` is the SHA256 digest of the Borsh serialization of the relevant +`EthereumEvent`. + +Changes to this `/eth_msgs` storage subspace are only ever made by +nodes as part of the ledger code based on the aggregate of votes +by validators for specific events. That is, changes to +`/eth_msgs` happen +in block `n` in a deterministic manner based on the votes included in the +block proposal for block `n`. Depending on the underlying Tendermint +version, these votes will either be included as vote extensions or as +protocol transactions. + +The `/eth_msgs` storage subspace will belong +to the `EthBridge` validity predicate. It should disallow any changes to +this storage from wasm transactions. + +### Including events into storage + +For every Namada block proposal, block proposer should include the votes for +events from other validators into their proposal. If the underlying Tendermint +version supports vote extensions, consensus invariants guarantee that a +quorum of votes from the previous block height can be included. Otherwise, +validators can only submit votes by broadcasting protocol transactions, +which comes with less guarantees (i.e. no consensus finality). + +The vote of a validator should include the events of the Ethereum blocks they +have seen via their full node such that: +1. It's correctly formatted. +2. It's reached the required number of confirmations on the Ethereum chain + +Each event that a validator is voting to include must be individually signed by +them. If the validator is not voting to include any events, they must still +provide a signed empty vector of events to indicate this. + +The votes will include be a Borsh-serialization of something like +the following. +```rust +/// This struct will be created and signed over by each +/// active validator, to be included as a vote extension at the end of a +/// Tendermint PreCommit phase or as Protocol Tx. +pub struct Vext { + /// The block height for which this [`Vext`] was made. + pub block_height: BlockHeight, + /// The address of the signing validator + pub validator_addr: Address, + /// The new ethereum events seen. These should be + /// deterministically ordered. + pub ethereum_events: Vec, +} +``` + +These votes will be given to the next block proposer who will +aggregate those that it can verify and will inject a signed protocol +transaction into their proposal. + +Validators will check this transaction and the validity of the new votes as +part of `ProcessProposal`, this includes checking: +- signatures +- that votes are really from active validators +- the calculation of backed voting power + +If vote extensions are supported, it is also checked that each vote extension +came from the previous round, requiring validators to sign over the Namada block +height with their vote extension. Signing over the block height also acts as +a replay protection mechanism. + +Furthermore, the vote extensions included by the block proposer should have +a quorum of the total voting power of the epoch of the block height behind +it. Otherwise the block proposer would not have passed the `FinalizeBlock` +phase of the last round of the last block. + +These checks are to prevent censorship +of events from validators by the block proposer. If vote extensions are not +enabled, unfortunately these checks cannot be made. + +In `FinalizeBlock`, we derive a second transaction (the "state update" +transaction) from the vote aggregation that: +- calculates the required changes to `/eth_msgs` storage and applies it +- acts on any `/eth_msgs/\$msg_hash` where `seen` is going from `false` to `true` + (e.g. appropriately minting wrapped Ethereum assets) + +This state update transaction will not be recorded on chain but will be +deterministically derived from the protocol transaction including the +aggregation of votes, which is recorded on chain. All ledger nodes will +derive and apply the appropriate state changes to their own local +blockchain storage. + +The value of `/eth_msgs/\$msg_hash/seen` will also indicate if the event +has been acted upon on the Namada side. The appropriate transfers of tokens +to the given user will be included on chain free of charge and requires no +additional actions from the end user. \ No newline at end of file diff --git a/documentation/specs/src/interoperability/ethereum-bridge/ethereum_smart_contracts.md b/documentation/specs/src/interoperability/ethereum-bridge/ethereum_smart_contracts.md new file mode 100644 index 0000000000..669122b420 --- /dev/null +++ b/documentation/specs/src/interoperability/ethereum-bridge/ethereum_smart_contracts.md @@ -0,0 +1,182 @@ +# Ethereum Smart Contracts + +## Contracts + +There are five smart contracts that make up an Ethereum bridge deployment. + +- Proxy +- Bridge +- Governance +- Vault +- wNAM + +### Proxy + +The _Proxy_ contract serves as a dumb storage for holding the addresses of other +contracts, specifically the _Governance_ contract, the _Vault_ contract and the +current _Bridge_ contract. Once deployed, it is modifiable only by the +_Governance_ contract, to update the address for which contract is the current +_Bridge_ contract. + +The _Proxy_ contract is fixed forever once the bridge has been deployed. + +### Bridge + +The _Bridge_ contract is the only contract that unprivileged users of the bridge +may interact with. It provides methods for transferring ERC20s to Namada +(holding them in escrow in the _Vault_), as well as releasing escrowed ERC20s +from the _Vault_ for transfers made from Namada to Ethereum. It holds a +whitelist of ERC20s that may cross the bridge, and this whitelist may be updated +by the _Governance_ contract. + +### Governance + +The _Governance_ contract may "upgrade" the bridge by updating the _Proxy_ +contract to point to a new _Bridge_ contract and/or a new _Governance_ contract. +It may also withdraw all funds from the _Vault_ to any specified Ethereum +address, if a quorum of validators choose to do so. + +### wNAM + +The _wNAM_ contract is a simple ERC20 token with a fixed supply, which is all +minted when the bridge is first deployed. After initial deployment, the entire +supply of _wNAM_ belongs to the _Vault_ contract. As NAM is transferred from +Namada to Ethereum, wNAM may be released from the _Vault_ by the _Bridge_. + +The _wNAM_ contract is fixed forever once the bridge has been deployed. + +### Vault + +The _Vault_ contract holds in escrow any ERC20 tokens that have been sent over +the bridge to Namada, as well as a supply of _wNAM_ ERC20s to represent NAM that +has been sent from Namada to Ethereum. Funds held by the _Vault_ may only be +spendable by the current _Bridge_ contract. When ERC20 tokens are transferred +from Ethereum to Namada, they must be deposited to the _Vault_ via the _Bridge_ +contract. + +The _Vault_ contract is fixed forever once the bridge has been deployed. + +## Namada-side configuration + +When an account on Namada becomes a validator, they must provide two Ethereum +secp256k1 keys: + +- the bridge key - a hot key for normal operations +- the governance key - a cold key for exceptional operations, like emergency + withdrawal from the bridge + +These keys are used to control the bridge smart contracts, via signing of +messages. Validators should be challenged periodically to prove they still retain +knowledge of their governance key, which is not regularly used. + +## Deployment + +The contracts should be deployable by anyone to any EVM chain using an automated +script. The following configuration should be agreed up front by Namada +governance before deployment: + +- details of the initial active validator set that will control the bridge - + specifically, for each validator: + - their hot Ethereum address + - their cold Ethereum address + - their voting power on Namada for the epoch when the bridge will launch +- the total supply of the wNAM ERC20 token, which will represent Namada-native + NAM on the EVM chain +- an initial whitelist of ERC20 tokens that may cross the bridge from Ethereum + to Namada - specifically, for each whitelisted ERC20: + - the Ethereum address of the ERC20 contract + - a cap on the total amount that may cross the bridge, in units of ERC20 + +After a deployment has finished successfully, the deployer must not have any +privileged control of any of the contracts deployed. Any privileged actions must +only be possible via a message signed by a validator set that the smart +contracts are storing details of. + +## Communication + +### From Ethereum to Namada + +A Namada chain's validators are configured to listen to events emitted by the +smart contracts pointed to by the _Proxy_ contract. The address of the _Proxy_ +contract is set in a governance parameter in Namada storage. Namada validators +treat emitted events as authoritative and take action on them. Namada also knows +the address of the _wNAM_ ERC20 contract via a governance parameter, and treats +transfers of this ERC20 to Namada as an indication to release native NAM from +the `#EthBridgeEscrow` account on Namada, rather than to mint a wrapped ERC20 as +is the case with all other ERC20s. + +### From Namada to Ethereum + +At any time, the _Governance_ and _Bridge_ contracts must store: + +- a hash of the current Namada epoch's active validator set +- a hash of another epoch's active validator set. When the bridge is first + deployed, this will also be the current Namada epoch's active validator set, + but after the first validator set update is submitted to the _Governance_ + smart contract, this hash will always be an adjacent Namada epoch's active + validator set i.e. either the previous epoch's, or the next epoch's + +In the case of the _Governance_ contract, these are hashes of a map of +validator's _cold_ key addresses to their voting powers, while for the _Bridge_ +contract it is hashes of a map of validator's _hot_ key addresses to their +voting powers. Namada validators may post signatures as on chain of relevant +messages to be relayed to the Ethereum bridge smart contracts (e.g. validator +set updates, pending transfers, etc.). Methods of the Ethereum bridge smart +contracts should generally accept: + +- some message +- full details of some active validator set (i.e. relevant Ethereum addresses + + voting powers) +- signatures over the message by validators from the this active validator set + +Given this data, anyone should be able to make the relevant Ethereum smart +contract method call, if they are willing to pay the Ethereum gas. A call is +then authorized to happen if: + +- The active validator set specified in the call hashes to *either* of the + validator set hashes stored in the smart contract +- A quorum (i.e. more than 2/3 by voting power) of the signatures over the + message are valid + +### Validator set updates + +Initial deployment aside, at the beginning of each epoch, the smart contracts +will contain details of the current epoch's validator set and the previous +epoch's validator set. Namada validators must endeavor to sign details of the +next epoch's validator set and post them on Namada chain in a protocol +transaction. Details of the next epoch's validator set and a quorum of +signatures over it by validators from the current epoch's validator set must +then be relayed to the _Governance_ contract before the end of the epoch, which +will update both the _Governance_ and _Bridge_ smart contracts to have the hash +of the next epoch's validator set rather than the previous epoch's validator +set. This should happen before the current Namada epoch ends. If this does not +happen, then the Namada chain must either halt or not progress to the next +epoch, to avoid losing control of the bridge. + +When a validator set update is submitted, the hashes for the oldest validator +set are effectively "evicted" from the _Governance_ and _Bridge_ smart +contracts. At that point, messages signed by that evicted validator set will no +longer be accepted by the bridge. + +#### Example flow + +- Namada epoch `10` begins. Currently, the _Governance_ contract knows the + hashes of the validator sets for epochs `9` and `10`, as does the _Bridge_ + contract. +- Validators for epoch `10` post signatures over the hash of details of the + validator set for epoch `11` to Namada as protocol transactions +- A point is reached during epoch `10` at which a quorum of such signatures is + present on the Namada chain +- A relayer submits a validator set update for epoch `11` to _Governance_, using + a quorum of signatures from the Namada chain +- The _Governance_ and _Bridge_ contracts now know the hashes of the validator + sets for epochs `10` and `11`, and will accept messages signed by either of + them. It will no longer accept messages signed by the validator set for epoch + `9`. +- Namada progresses to epoch `11`, and the flow repeats + +NB: the flow for when the bridge has just launched is similar, except the +contracts know the details of only one epoch's validator set - the launch +epoch's. E.g. if the bridge launches at epoch `10`, then initially the contracts +know the hash only for epoch `10` and not epochs `10` and `11`, until the first +validator set update has been submitted \ No newline at end of file diff --git a/documentation/specs/src/interoperability/ethereum-bridge/proofs.md b/documentation/specs/src/interoperability/ethereum-bridge/proofs.md new file mode 100644 index 0000000000..5c67147d68 --- /dev/null +++ b/documentation/specs/src/interoperability/ethereum-bridge/proofs.md @@ -0,0 +1,101 @@ +# Proofs + +A proof for the bridge is a quorum of signatures by a valid validator set. A +bridge header is a proof attached to a message understandable to the +Ethereum smart contracts. For transferring value to Ethereum, a proof is a +signed Merkle tree root and inclusion proofs of asset transfer messages +understandable to the Ethereum smart contracts, as described in the section on +[batching](transfers_to_ethereum.md#batching) + +A message for transferring value to Ethereum is a `TransferToNamada` +instance as described +[here](./transfers_to_ethereum.md#bridge-pool-validity-predicate). + +Additionally, when the validator set changes, the smart contracts on +Ethereum must be updated so that it can continue to recognize valid proofs. +Since the Ethereum smart contract should accept any bridge +header signed by 2 / 3 of the staking validators, it needs up-to-date +knowledge of: +- The current validators' public keys +- The current stake of each validator + +This means that by the end of every Namada epoch, a special transaction must be +sent to the Ethereum smart contracts detailing the new public keys and stake +of the new validator set. This message must also be signed by at least 2 / 3 +of the current validators as a "transfer of power". + +If vote extensions are available, a fully crafted transfer of power message +will be made available on-chain. Otherwise, this message must be crafted +offline by aggregating the protocol txs from validators in which the sign +over the new validator set. + +If vote extensions are available, this signed data can be constructed +using them. Otherwise, validators must send protocol txs to be included on +the ledger. Once a quorum exist on chain, they can be aggregated into a +single message that can be relayed to Ethereum. Signing an +invalid validator transition set will be considered a slashable offense. + +Due to asynchronicity concerns, this message should be submitted well in +advance of the actual epoch change. It should happen at the beginning of each +new epoch. Bridge headers to ethereum should include the current Namada epoch +so that the smart contract knows how to verify the headers. In short, there +is a pipelining mechanism in the smart contract - the active validators for epoch `n` submit details of the active validator set for epoch `n+1`. + +Such a message is not prompted by any user transaction and thus will have +to be carried out by a _bridge relayer_. Once the necessary data to +construct the transfer of power message is on chain, any time afterwards a +Namada bridge process may take it to craft the appropriate header to the +Ethereum smart contracts. + +The details on bridge relayers are below in the corresponding section. + +Signing incorrect headers is considered a slashable offense. Anyone witnessing +an incorrect header that is signed may submit a complaint (a type of transaction) +to initiate slashing of the validator who made the signature. + +## Namada Bridge Relayers + +Validator changes must be turned into a message that can be communicated to +smart contracts on Ethereum. These smart contracts need this information +to verify proofs of actions taken on Namada. + +Since this is protocol level information, it is not user prompted and thus +should not be the responsibility of any user to submit such a transaction. +However, any user may choose to submit this transaction anyway. + +This necessitates a Namada node whose job it is to submit these transactions on +Ethereum by the conclusion of each Namada epoch. This node is called the +__bridge relayer__. In theory, since this message is publicly available +on the blockchain, anyone can submit this transaction, but only the +bridge relayer will be directly compensated by Namada. + +The bridge relayer will be chosen to be the proposer of the first block of the +new epoch. Anyone else may relay this message, but must pay for the fees out of +their own pocket. + +All Namada validators will have an option to serve as bridge relayer and +the Namada ledger will include a process that does the relaying. Since all +Namada validators are running Ethereum full nodes, they can monitor +that the message was relayed correctly by the bridge relayer. + +If the Ethereum event spawned by relaying their message gets accepted by the +Ethereum state inclusion onto Namada, new NAM tokens will be minted to +reward them. The reward amount shall be a protocol parameter that can be +changed via governance. It should be high enough to cover necessary gas fees. + +### Recovering from an update failure + +If vote extensions are not available, we cannot guarantee that a quorum of +validator signatures can be gathered for the message that updates the +validator set before the epoch ends. + +If a significant number of validators become inactive in the next epoch, we +need a means to complete validator set update. Until this is done, the +bridge will halt. + +In this case, the validators from that epoch will need to craft a +transaction with a quorum of signatures offline and submit it on-chain. This +transaction should include the validator set update. + +The only way this is impossible is if more than 1/3 of the validators by +stake from that epoch delete their ethereum keys, which is extremely unlikely. diff --git a/documentation/specs/src/interoperability/ethereum-bridge/security.md b/documentation/specs/src/interoperability/ethereum-bridge/security.md new file mode 100644 index 0000000000..0b022b2e0e --- /dev/null +++ b/documentation/specs/src/interoperability/ethereum-bridge/security.md @@ -0,0 +1,10 @@ +# Security + +On Namada, the validators are full nodes of Ethereum and their stake is also +accounting for security of the bridge. If they carry out a forking attack +on Namada to steal locked tokens of Ethereum their stake will be slashed on Namada. +On the Ethereum side, we will add a limit to the amount of assets that can be +locked to limit the damage a forking attack on Namada can do. To make an attack +more cumbersome we will also add a limit on how fast wrapped Ethereum assets can +be redeemed from Namada. This will not add more security, but rather make the +attack more inconvenient. diff --git a/documentation/specs/src/interoperability/ethereum-bridge/transfers_to_ethereum.md b/documentation/specs/src/interoperability/ethereum-bridge/transfers_to_ethereum.md new file mode 100644 index 0000000000..83611a656b --- /dev/null +++ b/documentation/specs/src/interoperability/ethereum-bridge/transfers_to_ethereum.md @@ -0,0 +1,133 @@ +# Transferring from Namada to Ethereum + +Moving assets from Namada to Ethereum will not be automatic, as opposed the +movement of value in the opposite direction. Instead, users must send an +appropriate transaction to Namada to initiate a transfer across the bridge +to Ethereum. Once this transaction is approved, a ["proof"](proofs.md), or +the parts necessary to create a proof, will be created and posted on Namada. + +It is incumbent on the end user to request an appropriate proof +of the transaction. This proof must be submitted to the appropriate Ethereum smart +contract by the user to redeem Ethereum assets / mint wrapped assets. This also +means all Ethereum gas costs are the responsibility of the end user. + +A relayer binary will be developed to aid users in accessing the proofs +generated by Namada validators as well as posting this proof to Ethereum. It +will also aid in batching transactions. + +## Moving value to Ethereum + +To redeem wrapped Ethereum assets, a user should make a transaction to burn +their wrapped tokens, which the `#EthBridge` validity predicate will accept. +For sending NAM over the bridge, a user should send their NAM to +`#EthBridgeEscrow`. In both cases, it's important that the user also adds a +`PendingTransfer` to the [Bridge Pool](#bridge-pool-validity-predicate). + +## Batching + +Ethereum gas fees make it prohibitively expensive to submit +the proof for a single transaction over the bridge. Instead, it is typically +more economical to submit proofs of many transactions in bulk. This batching +is described in this section. + +A pool of transfers from Namada to Ethereum will be kept by Namada. Every +transaction to Ethereum that Namada validators approve will be added to this +pool. We call this the _Bridge Pool_. + +The Bridge Pool should be thought of as a sort of mempool. When users who +wish to move assets to Ethereum submit their transactions, they will pay some +additional amount of NAM (of their choosing) as a way of covering the gas +costs on Ethereum. Namada validators will hold these fees in a Bridge Pool +Escrow. + +When a batch of transactions from the Bridge Pool is submitted by a user to +Ethereum, Namada validators will receive notifications via their full nodes. +They will then pay out the fees for each submitted transaction to the user who +relayed these transactions (still in NAM). These will be paid out from the +Bridge Pool Escrow. + +The idea is that users will only relay transactions from the Bridge Pool +that make economic sense. This prevents DoS attacks by underpaying fees as +well as obviating the need for Ethereum gas price oracles. It also means +that transfers to Ethereum are not ordered, preventing other attack vectors. + +The Bridge Pool will be organized as a Merkle tree. Every time it is updated, +the root of tree must be signed by a quorum of validators. When a user +wishes to construct a batch of transactions to relay to Ethereum, they +include the signed tree root and inclusion proofs for the subset of the pool +they are relaying. This can be easily verified by the Ethereum smart contracts. + +If vote extensions are available, these are used to collect the signatures +over the Merkle tree root. If they are not, these must be submitted as protocol +transactions, introducing latency to the pool. A user wishing to relay will +need to wait until a Merkle tree root is signed for a tree that +includes all the transactions they wish to relay. + +The Ethereum smart contracts won't keep track of this signed Merkle root. +Instead, part of the proof of correct batching is submitting a root to the +contracts that is signed by quorum of validators. Since the smart contracts +can trust such a signed root, it can then use the root to verify inclusion +proofs. + +### Bridge Pool validity predicate + +The Bridge Pool will have associated storage under the control of a native +validity predicate. The storage layout looks as follows. + +``` +# all values are Borsh-serialized +/pending_transfers: Vec +/signed_root: Signed +``` + +The pending transfers are instances of the following type: +```rust +pub struct TransferToEthereum { + /// The type of token + pub asset: EthAddress, + /// The recipient address + pub recipient: EthAddress, + /// The amount to be transferred + pub amount: Amount, + /// a nonce for replay protection + pub nonce: u64, +} + +pub struct PendingTransfer { + /// The message to send to Ethereum to + /// complete the transfer + pub transfer: TransferToEthereum, + /// The gas fees paid by the user sending + /// this transfer + pub gas_fee: GasFee, +} + +pub struct GasFee { + /// The amount of gas fees (in NAM) + /// paid by the user sending this transfer + pub amount: Amount, + /// The address of the account paying the fees + pub payer: Address, +} +``` +When a user submits initiates a transfer, their transaction should include wasm +to craft a `PendingTransfer` and append it to the pool in storage as well as +send the relevant gas fees into the Bridge Pool's escrow. This will be +validated by the Bridge Pool vp. + +The signed Merkle root is only modifiable by validators. The Merkle tree +only consists of the `TransferToEthereum` messages as Ethereum does not need +information about the gas fees paid on Namada. + +If vote extensions are not available, this signed root may lag behind the +list of pending transactions. However, it should be the eventually every +pending transaction is covered by the root or it times out. + +## Replay Protection and timeouts + +It is important that nonces are used to prevent copies of the same +transaction being submitted multiple times. Since we do not want to enforce +an order on the transactions, these nonces should land in a range. As a +consequence of this, it is possible that transactions in the Bridge Pool will +time out. Transactions that timed out should revert the state changes on +Namada including refunding the paid in fees. diff --git a/documentation/specs/src/interoperability/ethereum-bridge/transfers_to_namada.md b/documentation/specs/src/interoperability/ethereum-bridge/transfers_to_namada.md new file mode 100644 index 0000000000..d45abbc8cf --- /dev/null +++ b/documentation/specs/src/interoperability/ethereum-bridge/transfers_to_namada.md @@ -0,0 +1,52 @@ +# Transferring assets from Ethereum to Namada + +In order to facilitate transferring assets from Ethereum to Namada, There + will be two internal accounts with associated native validity predicates: + +- `#EthBridge` - Controls the `/eth_msgs/` [storage](ethereum_events_attestation.md#storage) +- and ledgers of balances + for wrapped Ethereum assets (ERC20 tokens) structured in a + ["multitoken"](https://github.com/anoma/anoma/issues/1102) hierarchy +- `#EthBridgeEscrow` which will hold in escrow wrapped Namada tokens which have + been sent to Ethereum. + +#### Wrapped ERC20 + +If an ERC20 token is transferred to Namada, once the associated +`TransferToNamada` Ethereum event is included into Namada, validators mint +the appropriate amount to the corresponding multitoken balance key for +the receiver, or release the escrowed native Namada token. + +```rust +pub struct EthAddress(pub [u8; 20]); + +/// An event transferring some kind of value from Ethereum to Namada +pub struct TransferToNamada { + /// Quantity of ether in the transfer + pub amount: Amount, + /// Address on Ethereum of the asset + pub asset: EthereumAsset, + /// The Namada address receiving wrapped assets on Namada + pub receiver: Address, +} +``` + +##### Example + +For 10 DAI i.e. ERC20([0x6b175474e89094c44da98b954eedeac495271d0f](https://etherscan.io/token/0x6b175474e89094c44da98b954eedeac495271d0f)) to `atest1v4ehgw36xue5xvf5xvuyzvpjx5un2v3k8qeyvd3cxdqns32p89rrxd6xx9zngvpegccnzs699rdnnt` +``` +#EthBridge + /ERC20 + /0x6b175474e89094c44da98b954eedeac495271d0f + /balance + /atest1v4ehgw36xue5xvf5xvuyzvpjx5un2v3k8qeyvd3cxdqns32p89rrxd6xx9zngvpegccnzs699rdnnt + += 10 +``` + +#### Namada tokens + +Any wrapped Namada tokens being redeemed from Ethereum must have an +equivalent amount of the native token held in escrow by `#EthBridgeEscrow`. +Once the associated`TransferToNamada` Ethereum event is included into +Namada, validators should simply make a transfer from `#EthBridgeEscrow` to +the `receiver` for the appropriate amount and asset. diff --git a/encoding_spec/Cargo.toml b/encoding_spec/Cargo.toml index 2fc6c2cc18..a493ea3dc9 100644 --- a/encoding_spec/Cargo.toml +++ b/encoding_spec/Cargo.toml @@ -6,7 +6,7 @@ license = "GPL-3.0" name = "namada_encoding_spec" readme = "../README.md" resolver = "2" -version = "0.12.0" +version = "0.12.1" [features] default = ["abciplus"] diff --git a/ethereum_bridge/Cargo.toml b/ethereum_bridge/Cargo.toml new file mode 100644 index 0000000000..51acdff834 --- /dev/null +++ b/ethereum_bridge/Cargo.toml @@ -0,0 +1,47 @@ +[package] +authors = ["Heliax AG "] +edition = "2021" +license = "GPL-3.0" +name = "namada_ethereum_bridge" +resolver = "2" +version = "0.11.0" + +[features] +default = ["abciplus"] + +abcipp = [ + "tendermint-abcipp", + "tendermint-rpc-abcipp", + "tendermint-proto-abcipp", + "namada_core/abcipp", + "namada_proof_of_stake/abcipp" +] + +abciplus = [ + "tendermint", + "tendermint-rpc", + "tendermint-proto", + "namada_core/abciplus", + "namada_core/tendermint", + "namada_proof_of_stake/abciplus", +] + +[dependencies] +namada_core = {path = "../core", default-features = false, features = ["secp256k1-sign-verify", "ferveo-tpke"]} +namada_proof_of_stake = {path = "../proof_of_stake", default-features = false} +borsh = "0.9.0" +eyre = "0.6.8" +itertools = "0.10.0" +serde = {version = "1.0.125", features = ["derive"]} +serde_json = "1.0.62" +tendermint-abcipp = {package = "tendermint", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} +tendermint-rpc-abcipp = {package = "tendermint-rpc", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", features = ["http-client"], optional = true} +tendermint-proto-abcipp = {package = "tendermint-proto", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} +tendermint = {version = "0.23.6", optional = true} +tendermint-rpc = {version = "0.23.6", features = ["http-client"], optional = true} +tendermint-proto = {version = "0.23.6", optional = true} +tracing = "0.1.30" + +[dev-dependencies] +assert_matches = "1.5.0" +toml = "0.5.8" \ No newline at end of file diff --git a/ethereum_bridge/src/bridge_pool_vp.rs b/ethereum_bridge/src/bridge_pool_vp.rs new file mode 100644 index 0000000000..771d6eb172 --- /dev/null +++ b/ethereum_bridge/src/bridge_pool_vp.rs @@ -0,0 +1,28 @@ +use borsh::BorshSerialize; +use namada_core::ledger::eth_bridge::storage::bridge_pool::BRIDGE_POOL_ADDRESS; +use namada_core::ledger::storage::{DBIter, Storage, StorageHasher, DB}; +use namada_core::types::address::nam; +use namada_core::types::token::{balance_key, Amount}; + +/// Initialize the storage owned by the Bridge Pool VP. +/// +/// This means that the amount of escrowed gas fees is +/// initialized to 0. +pub fn init_storage(storage: &mut Storage) +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + let escrow_key = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); + storage + .write( + &escrow_key, + Amount::default() + .try_to_vec() + .expect("Serializing an amount shouldn't fail."), + ) + .expect( + "Initializing the escrow balance of the Bridge pool VP shouldn't \ + fail.", + ); +} diff --git a/ethereum_bridge/src/lib.rs b/ethereum_bridge/src/lib.rs new file mode 100644 index 0000000000..8a1e04d02b --- /dev/null +++ b/ethereum_bridge/src/lib.rs @@ -0,0 +1,5 @@ +pub mod bridge_pool_vp; +pub mod parameters; +pub mod protocol; +pub mod storage; +pub mod vp; diff --git a/ethereum_bridge/src/parameters.rs b/ethereum_bridge/src/parameters.rs new file mode 100644 index 0000000000..30ae43a37f --- /dev/null +++ b/ethereum_bridge/src/parameters.rs @@ -0,0 +1,197 @@ +//! Parameters for configuring the Ethereum bridge +use std::num::NonZeroU64; + +use borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::ledger::storage; +use namada_core::ledger::storage::types::encode; +use namada_core::ledger::storage::Storage; +use namada_core::types::ethereum_events::EthAddress; +use serde::{Deserialize, Serialize}; + +use crate::{bridge_pool_vp, storage as bridge_storage, vp}; + +/// Represents a configuration value for the minimum number of +/// confirmations an Ethereum event must reach before it can be acted on. +#[derive( + Clone, + Copy, + Eq, + PartialEq, + Debug, + Deserialize, + Serialize, + BorshSerialize, + BorshDeserialize, +)] +#[repr(transparent)] +pub struct MinimumConfirmations(NonZeroU64); + +impl Default for MinimumConfirmations { + fn default() -> Self { + // SAFETY: The only way the API contract of `NonZeroU64` can be violated + // is if we construct values of this type using 0 as argument. + Self(unsafe { NonZeroU64::new_unchecked(100) }) + } +} + +/// Represents a configuration value for the version of a contract that can be +/// upgraded. Starts from 1. +#[derive( + Clone, + Copy, + Eq, + PartialEq, + Debug, + Deserialize, + Serialize, + BorshSerialize, + BorshDeserialize, +)] +#[repr(transparent)] +pub struct ContractVersion(NonZeroU64); + +impl Default for ContractVersion { + fn default() -> Self { + // SAFETY: The only way the API contract of `NonZeroU64` can be + // violated is if we construct values of this type using 0 as + // argument. + Self(unsafe { NonZeroU64::new_unchecked(1) }) + } +} + +/// Represents an Ethereum contract that may be upgraded. +#[derive( + Clone, + Debug, + Eq, + PartialEq, + Deserialize, + Serialize, + BorshSerialize, + BorshDeserialize, +)] +pub struct UpgradeableContract { + /// The Ethereum address of the contract. + pub address: EthAddress, + /// The version of the contract. Starts from 1. + pub version: ContractVersion, +} + +/// Represents all the Ethereum contracts that need to be directly know about by +/// validators. +#[derive( + Clone, + Debug, + Eq, + PartialEq, + Deserialize, + Serialize, + BorshSerialize, + BorshDeserialize, +)] +pub struct Contracts { + /// The Ethereum address of the ERC20 contract that represents this chain's + /// native token. + pub native_erc20: EthAddress, + /// The Ethereum address of the bridge contract. + pub bridge: UpgradeableContract, + /// The Ethereum address of the governance contract. + pub governance: UpgradeableContract, +} + +/// Represents chain parameters for the Ethereum bridge. +#[derive( + Clone, + Debug, + Eq, + PartialEq, + Deserialize, + Serialize, + BorshSerialize, + BorshDeserialize, +)] +pub struct EthereumBridgeConfig { + /// Minimum number of confirmations needed to trust an Ethereum branch. + /// This must be at least one. + pub min_confirmations: MinimumConfirmations, + /// The addresses of the Ethereum contracts that need to be directly known + /// by validators. + pub contracts: Contracts, +} + +impl EthereumBridgeConfig { + /// Initialize the Ethereum bridge parameters in storage. + /// + /// If these parameters are initialized, the storage subspaces + /// for the Ethereum bridge VPs are also initialized. + pub fn init_storage(&self, storage: &mut Storage) + where + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: storage::traits::StorageHasher, + { + let Self { + min_confirmations, + contracts: + Contracts { + native_erc20, + bridge, + governance, + }, + } = self; + let min_confirmations_key = bridge_storage::min_confirmations_key(); + let native_erc20_key = bridge_storage::native_erc20_key(); + let bridge_contract_key = bridge_storage::bridge_contract_key(); + let governance_contract_key = bridge_storage::governance_contract_key(); + storage + .write(&min_confirmations_key, encode(min_confirmations)) + .unwrap(); + storage + .write(&native_erc20_key, encode(native_erc20)) + .unwrap(); + storage.write(&bridge_contract_key, encode(bridge)).unwrap(); + storage + .write(&governance_contract_key, encode(governance)) + .unwrap(); + // Initialize the storage for the Ethereum Bridge VP. + vp::init_storage(storage); + // Initialize the storage for the Bridge Pool VP. + bridge_pool_vp::init_storage(storage); + } +} + +#[cfg(test)] +mod tests { + use eyre::Result; + use namada_core::types::ethereum_events::EthAddress; + + use crate::parameters::{ + ContractVersion, Contracts, EthereumBridgeConfig, MinimumConfirmations, + UpgradeableContract, + }; + + /// Ensure we can serialize and deserialize a [`Config`] struct to and from + /// TOML. This can fail if complex fields are ordered before simple fields + /// in any of the config structs. + #[test] + fn test_round_trip_toml_serde() -> Result<()> { + let config = EthereumBridgeConfig { + min_confirmations: MinimumConfirmations::default(), + contracts: Contracts { + native_erc20: EthAddress([42; 20]), + bridge: UpgradeableContract { + address: EthAddress([23; 20]), + version: ContractVersion::default(), + }, + governance: UpgradeableContract { + address: EthAddress([18; 20]), + version: ContractVersion::default(), + }, + }, + }; + let serialized = toml::to_string(&config)?; + let deserialized: EthereumBridgeConfig = toml::from_str(&serialized)?; + + assert_eq!(config, deserialized); + Ok(()) + } +} diff --git a/ethereum_bridge/src/protocol/mod.rs b/ethereum_bridge/src/protocol/mod.rs new file mode 100644 index 0000000000..0824d7a9cb --- /dev/null +++ b/ethereum_bridge/src/protocol/mod.rs @@ -0,0 +1 @@ +pub mod transactions; diff --git a/ethereum_bridge/src/protocol/transactions/ethereum_events/eth_msgs.rs b/ethereum_bridge/src/protocol/transactions/ethereum_events/eth_msgs.rs new file mode 100644 index 0000000000..df9b9740ee --- /dev/null +++ b/ethereum_bridge/src/protocol/transactions/ethereum_events/eth_msgs.rs @@ -0,0 +1,117 @@ +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use namada_core::types::ethereum_events::EthereumEvent; +use namada_core::types::vote_extensions::ethereum_events::MultiSignedEthEvent; + +use crate::protocol::transactions::votes::{dedupe, Tally, Votes}; + +/// Represents an Ethereum event being seen by some validators +#[derive( + Debug, + Clone, + Ord, + PartialOrd, + PartialEq, + Eq, + Hash, + BorshSerialize, + BorshDeserialize, +)] +pub struct EthMsgUpdate { + /// The event being seen. + pub body: EthereumEvent, + /// New votes for this event. + // NOTE(feature = "abcipp"): This can just become BTreeSet
because + // BlockHeight will always be the previous block + pub seen_by: Votes, +} + +impl From for EthMsgUpdate { + fn from( + MultiSignedEthEvent { event, signers }: MultiSignedEthEvent, + ) -> Self { + Self { + body: event, + seen_by: dedupe(signers), + } + } +} + +/// Represents an event stored under `eth_msgs` +#[derive( + Clone, Debug, PartialEq, Eq, BorshSerialize, BorshDeserialize, BorshSchema, +)] +pub struct EthMsg { + /// The event being stored + pub body: EthereumEvent, + /// Tallying of votes for this event + pub votes: Tally, +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeSet; + + use namada_core::types::address; + use namada_core::types::ethereum_events::testing::{ + arbitrary_nonce, arbitrary_single_transfer, + }; + use namada_core::types::storage::BlockHeight; + + use super::*; + + #[test] + /// Tests [`From`] for [`EthMsgUpdate`] + fn test_from_multi_signed_eth_event_for_eth_msg_update() { + let sole_validator = address::testing::established_address_1(); + let receiver = address::testing::established_address_2(); + let event = arbitrary_single_transfer(arbitrary_nonce(), receiver); + let with_signers = MultiSignedEthEvent { + event: event.clone(), + signers: BTreeSet::from([( + sole_validator.clone(), + BlockHeight(100), + )]), + }; + let expected = EthMsgUpdate { + body: event, + seen_by: Votes::from([(sole_validator, BlockHeight(100))]), + }; + + let update: EthMsgUpdate = with_signers.into(); + + assert_eq!(update, expected); + } + + #[test] + /// Test that `From` for `EthMsgUpdate` does in fact + /// dedupe votes + fn test_from_multi_signed_eth_event_for_eth_msg_update_dedupes() { + let validator_1 = address::testing::established_address_1(); + let validator_2 = address::testing::established_address_2(); + let signers = BTreeSet::from([ + (validator_1.clone(), BlockHeight(100)), + (validator_2.clone(), BlockHeight(200)), + (validator_1, BlockHeight(300)), + (validator_2, BlockHeight(400)), + ]); + + let event = arbitrary_single_transfer( + arbitrary_nonce(), + address::testing::established_address_3(), + ); + let with_signers = MultiSignedEthEvent { + event: event.clone(), + signers: signers.clone(), + }; + + let update: EthMsgUpdate = with_signers.into(); + + assert_eq!( + update, + EthMsgUpdate { + body: event, + seen_by: dedupe(signers), + } + ); + } +} diff --git a/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs b/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs new file mode 100644 index 0000000000..9b2f9a460b --- /dev/null +++ b/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs @@ -0,0 +1,195 @@ +//! Logic for acting on events + +use std::collections::BTreeSet; + +use eyre::Result; +use namada_core::ledger::eth_bridge::storage::wrapped_erc20s; +use namada_core::ledger::storage::traits::StorageHasher; +use namada_core::ledger::storage::{DBIter, Storage, DB}; +use namada_core::types::ethereum_events::{EthereumEvent, TransferToNamada}; +use namada_core::types::storage::Key; + +use crate::protocol::transactions::update; + +/// Updates storage based on the given confirmed `event`. For example, for a +/// confirmed [`EthereumEvent::TransfersToNamada`], mint the corresponding +/// transferred assets to the appropriate receiver addresses. +pub(super) fn act_on( + storage: &mut Storage, + event: &EthereumEvent, +) -> Result> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + match &event { + EthereumEvent::TransfersToNamada { transfers, .. } => { + act_on_transfers_to_namada(storage, transfers) + } + _ => { + tracing::debug!(?event, "No actions taken for Ethereum event"); + Ok(BTreeSet::default()) + } + } +} + +fn act_on_transfers_to_namada( + storage: &mut Storage, + transfers: &[TransferToNamada], +) -> Result> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let mut changed_keys = BTreeSet::default(); + for TransferToNamada { + amount, + asset, + receiver, + } in transfers + { + let keys: wrapped_erc20s::Keys = asset.into(); + let balance_key = keys.balance(receiver); + update::amount(storage, &balance_key, |balance| { + tracing::debug!( + %balance_key, + ?balance, + "Existing value found", + ); + balance.receive(amount); + tracing::debug!( + %balance_key, + ?balance, + "New value calculated", + ); + })?; + _ = changed_keys.insert(balance_key); + + let supply_key = keys.supply(); + update::amount(storage, &supply_key, |supply| { + tracing::debug!( + %supply_key, + ?supply, + "Existing value found", + ); + supply.receive(amount); + tracing::debug!( + %supply_key, + ?supply, + "New value calculated", + ); + })?; + _ = changed_keys.insert(supply_key); + } + Ok(changed_keys) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use assert_matches::assert_matches; + use borsh::BorshSerialize; + use namada_core::ledger::storage::testing::TestStorage; + use namada_core::types::address; + use namada_core::types::ethereum_events::testing::{ + arbitrary_eth_address, arbitrary_keccak_hash, arbitrary_nonce, + DAI_ERC20_ETH_ADDRESS, + }; + use namada_core::types::token::Amount; + + use super::*; + + #[test] + /// Test that we do not make any changes to storage when acting on most + /// events + fn test_act_on_does_nothing_for_other_events() { + let mut storage = TestStorage::default(); + let events = vec![ + EthereumEvent::NewContract { + name: "bridge".to_string(), + address: arbitrary_eth_address(), + }, + EthereumEvent::TransfersToEthereum { + nonce: arbitrary_nonce(), + transfers: vec![], + }, + EthereumEvent::UpdateBridgeWhitelist { + nonce: arbitrary_nonce(), + whitelist: vec![], + }, + EthereumEvent::UpgradedContract { + name: "bridge".to_string(), + address: arbitrary_eth_address(), + }, + EthereumEvent::ValidatorSetUpdate { + nonce: arbitrary_nonce(), + bridge_validator_hash: arbitrary_keccak_hash(), + governance_validator_hash: arbitrary_keccak_hash(), + }, + ]; + + for event in events.iter() { + act_on(&mut storage, event).unwrap(); + let root = Key::from_str("").unwrap(); + assert_eq!( + storage.iter_prefix(&root).0.count(), + 0, + "storage changed unexpectedly while acting on event: {:#?}", + event + ); + } + } + + #[test] + /// Test that storage is indeed changed when we act on a non-empty + /// TransfersToNamada batch + fn test_act_on_changes_storage_for_transfers_to_namada() { + let mut storage = TestStorage::default(); + let amount = Amount::from(100); + let receiver = address::testing::established_address_1(); + let transfers = vec![TransferToNamada { + amount, + asset: DAI_ERC20_ETH_ADDRESS, + receiver, + }]; + let event = EthereumEvent::TransfersToNamada { + nonce: arbitrary_nonce(), + transfers, + }; + + act_on(&mut storage, &event).unwrap(); + + let root = Key::from_str("").unwrap(); + assert_eq!(storage.iter_prefix(&root).0.count(), 2); + } + + #[test] + /// Test acting on a single transfer and minting the first ever wDAI + fn test_act_on_transfers_to_namada_mints_wdai() { + let mut storage = TestStorage::default(); + + let amount = Amount::from(100); + let receiver = address::testing::established_address_1(); + let transfers = vec![TransferToNamada { + amount, + asset: DAI_ERC20_ETH_ADDRESS, + receiver: receiver.clone(), + }]; + + act_on_transfers_to_namada(&mut storage, &transfers).unwrap(); + + let wdai: wrapped_erc20s::Keys = (&DAI_ERC20_ETH_ADDRESS).into(); + let receiver_balance_key = wdai.balance(&receiver); + let wdai_supply_key = wdai.supply(); + + let root = Key::from_str("").unwrap(); + assert_eq!(storage.iter_prefix(&root).0.count(), 2); + + let expected_amount = amount.try_to_vec().unwrap(); + for key in vec![receiver_balance_key, wdai_supply_key] { + let (value, _) = storage.read(&key).unwrap(); + assert_matches!(value, Some(bytes) if bytes == expected_amount); + } + } +} diff --git a/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs b/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs new file mode 100644 index 0000000000..706dfdbd8c --- /dev/null +++ b/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs @@ -0,0 +1,461 @@ +//! Code for handling Ethereum events protocol txs. + +mod eth_msgs; +mod events; + +use std::collections::{BTreeSet, HashMap, HashSet}; + +use eth_msgs::EthMsgUpdate; +use eyre::Result; +use namada_core::ledger::storage::traits::StorageHasher; +use namada_core::ledger::storage::{DBIter, Storage, DB}; +use namada_core::types::address::Address; +use namada_core::types::storage::BlockHeight; +use namada_core::types::transaction::TxResult; +use namada_core::types::vote_extensions::ethereum_events::MultiSignedEthEvent; +use namada_core::types::voting_power::FractionalVotingPower; + +use super::ChangedKeys; +use crate::protocol::transactions::utils; +use crate::protocol::transactions::votes::update::NewVotes; +use crate::protocol::transactions::votes::{self, calculate_new}; +use crate::storage::vote_tallies; + +impl utils::GetVoters for HashSet { + #[inline] + fn get_voters(&self) -> HashSet<(Address, BlockHeight)> { + self.iter().fold(HashSet::new(), |mut voters, update| { + voters.extend(update.seen_by.clone().into_iter()); + voters + }) + } +} + +/// Applies derived state changes to storage, based on Ethereum `events` which +/// were newly seen by some active validator(s) in the last epoch. For `events` +/// which have been seen by enough voting power, extra state changes may take +/// place, such as minting of wrapped ERC20s. +/// +/// This function is deterministic based on some existing blockchain state and +/// the passed `events`. +pub fn apply_derived_tx( + storage: &mut Storage, + events: Vec, +) -> Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + if events.is_empty() { + return Ok(TxResult::default()); + } + tracing::info!( + ethereum_events = events.len(), + "Applying state updates derived from Ethereum events found in \ + protocol transaction" + ); + + let updates = events.into_iter().map(Into::::into).collect(); + + let voting_powers = utils::get_voting_powers(storage, &updates)?; + + let changed_keys = apply_updates(storage, updates, voting_powers)?; + + Ok(TxResult { + changed_keys, + ..Default::default() + }) +} + +/// Apply votes to Ethereum events in storage and act on any events which are +/// confirmed. +/// +/// The `voting_powers` map must contain a voting power for all +/// `(Address, BlockHeight)`s that occur in any of the `updates`. +pub(super) fn apply_updates( + storage: &mut Storage, + updates: HashSet, + voting_powers: HashMap<(Address, BlockHeight), FractionalVotingPower>, +) -> Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + tracing::debug!( + updates.len = updates.len(), + ?voting_powers, + "Applying Ethereum state update transaction" + ); + + let mut changed_keys = BTreeSet::default(); + let mut confirmed = vec![]; + for update in updates { + // The order in which updates are applied to storage does not matter. + // The final storage state will be the same regardless. + let (mut changed, newly_confirmed) = + apply_update(storage, update.clone(), &voting_powers)?; + changed_keys.append(&mut changed); + if newly_confirmed { + confirmed.push(update.body); + } + } + if confirmed.is_empty() { + tracing::debug!("No events were newly confirmed"); + return Ok(changed_keys); + } + tracing::debug!(n = confirmed.len(), "Events were newly confirmed",); + + // Right now, the order in which events are acted on does not matter. + // For `TransfersToNamada` events, they can happen in any order. + for event in &confirmed { + let mut changed = events::act_on(storage, event)?; + changed_keys.append(&mut changed); + } + Ok(changed_keys) +} + +/// Apply an [`EthMsgUpdate`] to storage. Returns any keys changed and whether +/// the event was newly seen. +/// +/// The `voting_powers` map must contain a voting power for all +/// `(Address, BlockHeight)`s that occur in `update`. +fn apply_update( + storage: &mut Storage, + update: EthMsgUpdate, + voting_powers: &HashMap<(Address, BlockHeight), FractionalVotingPower>, +) -> Result<(ChangedKeys, bool)> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let eth_msg_keys = vote_tallies::Keys::from(&update.body); + + // we arbitrarily look at whether the seen key is present to + // determine if the /eth_msg already exists in storage, but maybe there + // is a less arbitrary way to do this + let (exists_in_storage, _) = storage.has_key(ð_msg_keys.seen())?; + + let (vote_tracking, changed, confirmed) = if !exists_in_storage { + tracing::debug!(%eth_msg_keys.prefix, "Ethereum event not seen before by any validator"); + let vote_tracking = calculate_new(update.seen_by, voting_powers)?; + let changed = eth_msg_keys.into_iter().collect(); + let confirmed = vote_tracking.seen; + (vote_tracking, changed, confirmed) + } else { + tracing::debug!( + %eth_msg_keys.prefix, + "Ethereum event already exists in storage", + ); + let new_votes = NewVotes::new(update.seen_by.clone(), voting_powers)?; + let (vote_tracking, changed) = + votes::update::calculate(storage, ð_msg_keys, new_votes)?; + if changed.is_empty() { + return Ok((changed, false)); + } + let confirmed = + vote_tracking.seen && changed.contains(ð_msg_keys.seen()); + (vote_tracking, changed, confirmed) + }; + + votes::storage::write( + storage, + ð_msg_keys, + &update.body, + &vote_tracking, + )?; + + Ok((changed, confirmed)) +} + +#[cfg(test)] +mod tests { + use std::collections::{BTreeSet, HashMap, HashSet}; + + use borsh::BorshDeserialize; + use namada_core::ledger::eth_bridge::storage::wrapped_erc20s; + use namada_core::ledger::storage::mockdb::MockDB; + use namada_core::ledger::storage::testing::TestStorage; + use namada_core::ledger::storage::traits::Sha256Hasher; + use namada_core::types::address; + use namada_core::types::ethereum_events::testing::{ + arbitrary_amount, arbitrary_eth_address, arbitrary_nonce, + arbitrary_single_transfer, DAI_ERC20_ETH_ADDRESS, + }; + use namada_core::types::ethereum_events::{ + EthereumEvent, TransferToNamada, + }; + use namada_core::types::token::Amount; + use namada_proof_of_stake::epoched::Epoched; + use namada_proof_of_stake::storage::ValidatorSet; + use namada_proof_of_stake::types::WeightedValidator; + use namada_proof_of_stake::PosBase; + + use super::*; + use crate::protocol::transactions::utils::GetVoters; + use crate::protocol::transactions::votes::Votes; + + #[test] + /// Test applying a `TransfersToNamada` batch containing a single transfer + fn test_apply_single_transfer() -> Result<()> { + let sole_validator = address::testing::gen_established_address(); + let receiver = address::testing::established_address_2(); + + let amount = arbitrary_amount(); + let asset = arbitrary_eth_address(); + let body = EthereumEvent::TransfersToNamada { + nonce: arbitrary_nonce(), + transfers: vec![TransferToNamada { + amount, + asset, + receiver: receiver.clone(), + }], + }; + let update = EthMsgUpdate { + body: body.clone(), + seen_by: Votes::from([(sole_validator.clone(), BlockHeight(100))]), + }; + let updates = HashSet::from_iter(vec![update]); + let voting_powers = HashMap::from_iter(vec![( + (sole_validator.clone(), BlockHeight(100)), + FractionalVotingPower::new(1, 1).unwrap(), + )]); + let mut storage = TestStorage::default(); + + let changed_keys = apply_updates(&mut storage, updates, voting_powers)?; + + let eth_msg_keys: vote_tallies::Keys = (&body).into(); + let wrapped_erc20_keys: wrapped_erc20s::Keys = (&asset).into(); + assert_eq!( + BTreeSet::from_iter(vec![ + eth_msg_keys.body(), + eth_msg_keys.seen(), + eth_msg_keys.seen_by(), + eth_msg_keys.voting_power(), + wrapped_erc20_keys.balance(&receiver), + wrapped_erc20_keys.supply(), + ]), + changed_keys + ); + + let (body_bytes, _) = storage.read(ð_msg_keys.body())?; + let body_bytes = body_bytes.unwrap(); + assert_eq!(EthereumEvent::try_from_slice(&body_bytes)?, body); + + let (seen_bytes, _) = storage.read(ð_msg_keys.seen())?; + let seen_bytes = seen_bytes.unwrap(); + assert!(bool::try_from_slice(&seen_bytes)?); + + let (seen_by_bytes, _) = storage.read(ð_msg_keys.seen_by())?; + let seen_by_bytes = seen_by_bytes.unwrap(); + assert_eq!( + Votes::try_from_slice(&seen_by_bytes)?, + Votes::from([(sole_validator, BlockHeight(100))]) + ); + + let (voting_power_bytes, _) = + storage.read(ð_msg_keys.voting_power())?; + let voting_power_bytes = voting_power_bytes.unwrap(); + assert_eq!(<(u64, u64)>::try_from_slice(&voting_power_bytes)?, (1, 1)); + + let (wrapped_erc20_balance_bytes, _) = + storage.read(&wrapped_erc20_keys.balance(&receiver))?; + let wrapped_erc20_balance_bytes = wrapped_erc20_balance_bytes.unwrap(); + assert_eq!( + Amount::try_from_slice(&wrapped_erc20_balance_bytes)?, + amount + ); + + let (wrapped_erc20_supply_bytes, _) = + storage.read(&wrapped_erc20_keys.supply())?; + let wrapped_erc20_supply_bytes = wrapped_erc20_supply_bytes.unwrap(); + assert_eq!( + Amount::try_from_slice(&wrapped_erc20_supply_bytes)?, + amount + ); + + Ok(()) + } + + /// Set up a `TestStorage` initialized at genesis with validators of equal + /// power + fn set_up_test_storage( + active_validators: HashSet
, + ) -> Storage { + let mut storage = TestStorage::default(); + let validator_set = ValidatorSet { + active: active_validators + .into_iter() + .map(|address| WeightedValidator { + bonded_stake: 100_u64, + address, + }) + .collect(), + inactive: BTreeSet::default(), + }; + let validator_sets = Epoched::init_at_genesis(validator_set, 1); + storage.write_validator_set(&validator_sets); + storage + } + + #[test] + /// Test applying a single transfer via `apply_derived_tx`, where an event + /// has enough voting power behind it for it to be applied at the same time + /// that it is recorded in storage + fn test_apply_derived_tx_new_event_mint_immediately() { + let sole_validator = address::testing::established_address_2(); + let mut storage = set_up_test_storage(HashSet::from_iter(vec![ + sole_validator.clone(), + ])); + let receiver = address::testing::established_address_1(); + + let event = EthereumEvent::TransfersToNamada { + nonce: 1.into(), + transfers: vec![TransferToNamada { + amount: Amount::from(100), + asset: DAI_ERC20_ETH_ADDRESS, + receiver: receiver.clone(), + }], + }; + + let result = apply_derived_tx( + &mut storage, + vec![MultiSignedEthEvent { + event: event.clone(), + signers: BTreeSet::from([(sole_validator, BlockHeight(100))]), + }], + ); + + let tx_result = match result { + Ok(tx_result) => tx_result, + Err(err) => panic!("unexpected error: {:#?}", err), + }; + + assert_eq!( + tx_result.gas_used, 0, + "No gas should be used for a derived transaction" + ); + let eth_msg_keys = vote_tallies::Keys::from(&event); + let dai_keys = wrapped_erc20s::Keys::from(&DAI_ERC20_ETH_ADDRESS); + assert_eq!( + tx_result.changed_keys, + BTreeSet::from_iter(vec![ + eth_msg_keys.body(), + eth_msg_keys.seen(), + eth_msg_keys.seen_by(), + eth_msg_keys.voting_power(), + dai_keys.balance(&receiver), + dai_keys.supply(), + ]) + ); + assert!(tx_result.vps_result.accepted_vps.is_empty()); + assert!(tx_result.vps_result.rejected_vps.is_empty()); + assert!(tx_result.vps_result.errors.is_empty()); + assert!(tx_result.initialized_accounts.is_empty()); + assert!(tx_result.ibc_event.is_none()); + } + + /// Test calling apply_derived_tx for an event that isn't backed by enough + /// voting power to be acted on immediately + #[test] + fn test_apply_derived_tx_new_event_dont_mint() { + let validator_a = address::testing::established_address_2(); + let validator_b = address::testing::established_address_3(); + let mut storage = set_up_test_storage(HashSet::from_iter(vec![ + validator_a.clone(), + validator_b, + ])); + let receiver = address::testing::established_address_1(); + + let event = EthereumEvent::TransfersToNamada { + nonce: 1.into(), + transfers: vec![TransferToNamada { + amount: Amount::from(100), + asset: DAI_ERC20_ETH_ADDRESS, + receiver, + }], + }; + + let result = apply_derived_tx( + &mut storage, + vec![MultiSignedEthEvent { + event: event.clone(), + signers: BTreeSet::from([(validator_a, BlockHeight(100))]), + }], + ); + let tx_result = match result { + Ok(tx_result) => tx_result, + Err(err) => panic!("unexpected error: {:#?}", err), + }; + + let eth_msg_keys = vote_tallies::Keys::from(&event); + assert_eq!( + tx_result.changed_keys, + BTreeSet::from_iter(vec![ + eth_msg_keys.body(), + eth_msg_keys.seen(), + eth_msg_keys.seen_by(), + eth_msg_keys.voting_power(), + ]), + "The Ethereum event should have been recorded, but no minting \ + should have happened yet as it has only been seen by 1/2 the \ + voting power so far" + ); + } + + #[test] + /// Assert we don't return anything if we try to get the votes for an empty + /// set of updates + pub fn test_get_votes_for_updates_empty() { + let updates = HashSet::new(); + assert!(updates.get_voters().is_empty()); + } + + #[test] + /// Test that we correctly get the votes from a set of updates + pub fn test_get_votes_for_events() { + let updates = HashSet::from([ + EthMsgUpdate { + body: arbitrary_single_transfer( + 1.into(), + address::testing::established_address_1(), + ), + seen_by: Votes::from([ + ( + address::testing::established_address_1(), + BlockHeight(100), + ), + ( + address::testing::established_address_2(), + BlockHeight(102), + ), + ]), + }, + EthMsgUpdate { + body: arbitrary_single_transfer( + 2.into(), + address::testing::established_address_2(), + ), + seen_by: Votes::from([ + ( + address::testing::established_address_1(), + BlockHeight(101), + ), + ( + address::testing::established_address_3(), + BlockHeight(100), + ), + ]), + }, + ]); + let voters = updates.get_voters(); + assert_eq!( + voters, + HashSet::from([ + (address::testing::established_address_1(), BlockHeight(100)), + (address::testing::established_address_1(), BlockHeight(101)), + (address::testing::established_address_2(), BlockHeight(102)), + (address::testing::established_address_3(), BlockHeight(100)) + ]) + ) + } +} diff --git a/ethereum_bridge/src/protocol/transactions/mod.rs b/ethereum_bridge/src/protocol/transactions/mod.rs new file mode 100644 index 0000000000..9c7f684b71 --- /dev/null +++ b/ethereum_bridge/src/protocol/transactions/mod.rs @@ -0,0 +1,19 @@ +//! This module contains functionality for handling protocol transactions. +//! +//! When a protocol transaction is included in a block, we may expect all nodes +//! to update their blockchain state in a deterministic way. This can be done +//! natively rather than via the wasm environment as happens with regular +//! transactions. +pub mod ethereum_events; +mod read; +mod update; +mod utils; +pub mod validator_set_update; +mod votes; + +use std::collections::BTreeSet; + +use namada_core::types::storage; + +/// The keys changed while applying a protocol transaction. +pub type ChangedKeys = BTreeSet; diff --git a/ethereum_bridge/src/protocol/transactions/read.rs b/ethereum_bridge/src/protocol/transactions/read.rs new file mode 100644 index 0000000000..1562045873 --- /dev/null +++ b/ethereum_bridge/src/protocol/transactions/read.rs @@ -0,0 +1,100 @@ +//! Helpers for reading from storage +use borsh::BorshDeserialize; +use eyre::{eyre, Result}; +use namada_core::ledger::storage::traits::StorageHasher; +use namada_core::ledger::storage::{DBIter, Storage, DB}; +use namada_core::types::storage; +use namada_core::types::token::Amount; + +/// Returns the stored Amount, or 0 if not stored +pub(super) fn amount_or_default( + storage: &Storage, + key: &storage::Key, +) -> Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + Ok(maybe_value(storage, key)?.unwrap_or_default()) +} + +/// Read some arbitrary value from storage, erroring if it's not found +pub(super) fn value( + storage: &Storage, + key: &storage::Key, +) -> Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + maybe_value(storage, key)?.ok_or_else(|| eyre!("no value found at {}", key)) +} + +/// Try to read some arbitrary value from storage, returning `None` if nothing +/// is read. This will still error if there is data stored at `key` but it is +/// not deserializable to `T`. +pub(super) fn maybe_value( + storage: &Storage, + key: &storage::Key, +) -> Result> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let (maybe_val, _) = storage.read(key)?; + let bytes = match maybe_val { + Some(bytes) => bytes, + None => return Ok(None), + }; + let deserialized = T::try_from_slice(&bytes[..])?; + Ok(Some(deserialized)) +} + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + use borsh::BorshSerialize; + use namada_core::ledger::storage::testing::TestStorage; + use namada_core::types::storage; + use namada_core::types::token::Amount; + + use crate::protocol::transactions::read; + + #[test] + fn test_amount_returns_zero_for_uninitialized_storage() { + let fake_storage = TestStorage::default(); + let a = read::amount_or_default( + &fake_storage, + &storage::Key::parse( + "some arbitrary key with no stored + value", + ) + .unwrap(), + ) + .unwrap(); + assert_eq!(a, Amount::from(0)); + } + + #[test] + fn test_amount_returns_stored_amount() { + let key = storage::Key::parse("some arbitrary key").unwrap(); + let amount = Amount::from(1_000_000); + let mut fake_storage = TestStorage::default(); + fake_storage + .write(&key, amount.try_to_vec().unwrap()) + .unwrap(); + + let a = read::amount_or_default(&fake_storage, &key).unwrap(); + assert_eq!(a, amount); + } + + #[test] + fn test_amount_errors_if_not_amount() { + let key = storage::Key::parse("some arbitrary key").unwrap(); + let amount = "not an Amount type"; + let mut fake_storage = TestStorage::default(); + fake_storage.write(&key, amount.as_bytes()).unwrap(); + + assert_matches!(read::amount_or_default(&fake_storage, &key), Err(_)); + } +} diff --git a/ethereum_bridge/src/protocol/transactions/update.rs b/ethereum_bridge/src/protocol/transactions/update.rs new file mode 100644 index 0000000000..ec3cd686ca --- /dev/null +++ b/ethereum_bridge/src/protocol/transactions/update.rs @@ -0,0 +1,70 @@ +//! Helpers for writing to storage +use borsh::{BorshDeserialize, BorshSerialize}; +use eyre::Result; +use namada_core::ledger::storage::{DBIter, Storage, StorageHasher, DB}; +use namada_core::types::storage; +use namada_core::types::token::Amount; + +/// Reads the `Amount` from key, applies update then writes it back +pub fn amount( + store: &mut Storage, + key: &storage::Key, + update: impl FnOnce(&mut Amount), +) -> Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let mut amount = super::read::amount_or_default(store, key)?; + update(&mut amount); + store.write(key, amount.try_to_vec()?)?; + Ok(amount) +} + +#[allow(dead_code)] +/// Reads an arbitrary value, applies update then writes it back +pub fn value( + store: &mut Storage, + key: &storage::Key, + update: impl FnOnce(&mut T), +) -> Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let mut value = super::read::value(store, key)?; + update(&mut value); + store.write(key, value.try_to_vec()?)?; + Ok(value) +} + +#[cfg(test)] +mod tests { + use borsh::{BorshDeserialize, BorshSerialize}; + use eyre::{eyre, Result}; + use namada_core::ledger::storage::testing::TestStorage; + use namada_core::types::storage; + + #[test] + /// Test updating a value + fn test_value() -> Result<()> { + let key = storage::Key::parse("some arbitrary key") + .expect("could not set up test"); + let value = 21; + let mut storage = TestStorage::default(); + let serialized = value.try_to_vec().expect("could not set up test"); + storage + .write(&key, serialized) + .expect("could not set up test"); + + super::value(&mut storage, &key, |v: &mut i32| *v *= 2)?; + + let (new_val, _) = storage.read(&key)?; + let new_val = match new_val { + Some(new_val) => ::try_from_slice(&new_val)?, + None => return Err(eyre!("no value found")), + }; + assert_eq!(new_val, 42); + Ok(()) + } +} diff --git a/ethereum_bridge/src/protocol/transactions/utils.rs b/ethereum_bridge/src/protocol/transactions/utils.rs new file mode 100644 index 0000000000..a4b004304e --- /dev/null +++ b/ethereum_bridge/src/protocol/transactions/utils.rs @@ -0,0 +1,315 @@ +use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; + +use eyre::eyre; +use itertools::Itertools; +use namada_core::ledger::storage::{DBIter, Storage, StorageHasher, DB}; +use namada_core::types::address::Address; +use namada_core::types::storage::BlockHeight; +use namada_core::types::token; +use namada_core::types::voting_power::FractionalVotingPower; +use namada_proof_of_stake::pos_queries::PosQueries; +use namada_proof_of_stake::types::WeightedValidator; + +/// Proof of some arbitrary tally whose voters can be queried. +pub(super) trait GetVoters { + /// Extract all the voters and the block heights at which they voted from + /// the given proof. + fn get_voters(&self) -> HashSet<(Address, BlockHeight)>; +} + +/// Returns a map whose keys are addresses of validators and the block height at +/// which they signed some arbitrary object, and whose values are the voting +/// powers of these validators at the key's given block height. +pub(super) fn get_voting_powers( + storage: &Storage, + proof: &P, +) -> eyre::Result> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + P: GetVoters + ?Sized, +{ + let voters = proof.get_voters(); + tracing::debug!(?voters, "Got validators who voted on at least one event"); + + let active_validators = get_active_validators( + storage, + voters.iter().map(|(_, h)| h.to_owned()).collect(), + ); + tracing::debug!( + n = active_validators.len(), + ?active_validators, + "Got active validators" + ); + + let voting_powers = + get_voting_powers_for_selected(&active_validators, voters)?; + tracing::debug!( + ?voting_powers, + "Got voting powers for relevant validators" + ); + + Ok(voting_powers) +} + +pub(super) fn get_active_validators( + storage: &Storage, + block_heights: HashSet, +) -> BTreeMap> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let mut active_validators = BTreeMap::default(); + for height in block_heights.into_iter() { + let epoch = storage.get_epoch(height).expect( + "The epoch of the last block height should always be known", + ); + _ = active_validators + .insert(height, storage.get_active_validators(Some(epoch))); + } + active_validators +} + +/// Gets the voting power of `selected` from `all_active`. Errors if a +/// `selected` validator is not found in `all_active`. +pub(super) fn get_voting_powers_for_selected( + all_active: &BTreeMap>, + selected: HashSet<(Address, BlockHeight)>, +) -> eyre::Result> { + let total_voting_powers = sum_voting_powers_for_block_heights(all_active); + let voting_powers = selected + .into_iter() + .map( + |(addr, height)| -> eyre::Result<( + (Address, BlockHeight), + FractionalVotingPower, + )> { + let active_validators = + all_active.get(&height).ok_or_else(|| { + eyre!("No active validators found for height {height}") + })?; + let individual_voting_power = active_validators + .iter() + .find(|&v| v.address == addr) + .ok_or_else(|| { + eyre!( + "No active validator found with address {addr} \ + for height {height}" + ) + })? + .bonded_stake; + let total_voting_power = total_voting_powers + .get(&height) + .ok_or_else(|| { + eyre!( + "No total voting power provided for height \ + {height}" + ) + })? + .to_owned(); + Ok(( + (addr, height), + FractionalVotingPower::new( + individual_voting_power, + total_voting_power.into(), + )?, + )) + }, + ) + .try_collect()?; + Ok(voting_powers) +} + +pub(super) fn sum_voting_powers_for_block_heights( + validators: &BTreeMap>, +) -> BTreeMap { + validators + .iter() + .map(|(h, vs)| (h.to_owned(), sum_voting_powers(vs))) + .collect() +} + +pub(super) fn sum_voting_powers( + validators: &BTreeSet, +) -> token::Amount { + validators + .iter() + .map(|validator| validator.bonded_stake) + .sum::() + .into() +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + + use assert_matches::assert_matches; + use namada_core::types::address; + use namada_core::types::ethereum_events::testing::arbitrary_bonded_stake; + + use super::*; + + #[test] + /// Test getting the voting power for the sole active validator from the set + /// of active validators + fn test_get_voting_powers_for_selected_sole_validator() { + let sole_validator = address::testing::established_address_1(); + let bonded_stake = arbitrary_bonded_stake(); + let weighted_sole_validator = WeightedValidator { + bonded_stake: bonded_stake.into(), + address: sole_validator.clone(), + }; + let validators = HashSet::from_iter(vec![( + sole_validator.clone(), + BlockHeight(100), + )]); + let active_validators = BTreeMap::from_iter(vec![( + BlockHeight(100), + BTreeSet::from_iter(vec![weighted_sole_validator]), + )]); + + let result = + get_voting_powers_for_selected(&active_validators, validators); + + let voting_powers = match result { + Ok(voting_powers) => voting_powers, + Err(error) => panic!("error: {:?}", error), + }; + assert_eq!(voting_powers.len(), 1); + assert_matches!( + voting_powers.get(&(sole_validator, BlockHeight(100))), + Some(v) if *v == FractionalVotingPower::new(1, 1).unwrap() + ); + } + + #[test] + /// Test that an error is returned if a validator is not found in the set of + /// active validators + fn test_get_voting_powers_for_selected_missing_validator() { + let present_validator = address::testing::established_address_1(); + let missing_validator = address::testing::established_address_2(); + let bonded_stake = arbitrary_bonded_stake(); + let weighted_present_validator = WeightedValidator { + bonded_stake: bonded_stake.into(), + address: present_validator.clone(), + }; + let validators = HashSet::from_iter(vec![ + (present_validator, BlockHeight(100)), + (missing_validator, BlockHeight(100)), + ]); + let active_validators = BTreeMap::from_iter(vec![( + BlockHeight(100), + BTreeSet::from_iter(vec![weighted_present_validator]), + )]); + + let result = + get_voting_powers_for_selected(&active_validators, validators); + + assert!(result.is_err()); + } + + #[test] + /// Assert we error if we are passed an `(Address, BlockHeight)` but are not + /// given a corrseponding set of validators for the block height + fn test_get_voting_powers_for_selected_no_active_validators_for_height() { + let all_active = BTreeMap::default(); + let selected = HashSet::from_iter(vec![( + address::testing::established_address_1(), + BlockHeight(100), + )]); + + let result = get_voting_powers_for_selected(&all_active, selected); + + assert!(result.is_err()); + } + + #[test] + /// Test getting the voting powers for two active validators from the set of + /// active validators + fn test_get_voting_powers_for_selected_two_validators() { + let validator_1 = address::testing::established_address_1(); + let validator_2 = address::testing::established_address_2(); + let bonded_stake_1 = token::Amount::from(100); + let bonded_stake_2 = token::Amount::from(200); + let weighted_validator_1 = WeightedValidator { + bonded_stake: bonded_stake_1.into(), + address: validator_1.clone(), + }; + let weighted_validator_2 = WeightedValidator { + bonded_stake: bonded_stake_2.into(), + address: validator_2.clone(), + }; + let validators = HashSet::from_iter(vec![ + (validator_1.clone(), BlockHeight(100)), + (validator_2.clone(), BlockHeight(100)), + ]); + let active_validators = BTreeMap::from_iter(vec![( + BlockHeight(100), + BTreeSet::from_iter(vec![ + weighted_validator_1, + weighted_validator_2, + ]), + )]); + + let result = + get_voting_powers_for_selected(&active_validators, validators); + + let voting_powers = match result { + Ok(voting_powers) => voting_powers, + Err(error) => panic!("error: {:?}", error), + }; + assert_eq!(voting_powers.len(), 2); + assert_matches!( + voting_powers.get(&(validator_1, BlockHeight(100))), + Some(v) if *v == FractionalVotingPower::new(100, 300).unwrap() + ); + assert_matches!( + voting_powers.get(&(validator_2, BlockHeight(100))), + Some(v) if *v == FractionalVotingPower::new(200, 300).unwrap() + ); + } + + #[test] + /// Test summing the voting powers for a set of validators containing only + /// one validator + fn test_sum_voting_powers_sole_validator() { + let sole_validator = address::testing::established_address_1(); + let bonded_stake = arbitrary_bonded_stake(); + let weighted_sole_validator = WeightedValidator { + bonded_stake: bonded_stake.into(), + address: sole_validator, + }; + let validators = BTreeSet::from_iter(vec![weighted_sole_validator]); + + let total = sum_voting_powers(&validators); + + assert_eq!(total, bonded_stake); + } + + #[test] + /// Test summing the voting powers for a set of validators containing two + /// validators + fn test_sum_voting_powers_two_validators() { + let validator_1 = address::testing::established_address_1(); + let validator_2 = address::testing::established_address_2(); + let bonded_stake_1 = token::Amount::from(100); + let bonded_stake_2 = token::Amount::from(200); + let weighted_validator_1 = WeightedValidator { + bonded_stake: bonded_stake_1.into(), + address: validator_1, + }; + let weighted_validator_2 = WeightedValidator { + bonded_stake: bonded_stake_2.into(), + address: validator_2, + }; + let validators = BTreeSet::from_iter(vec![ + weighted_validator_1, + weighted_validator_2, + ]); + + let total = sum_voting_powers(&validators); + + assert_eq!(total, token::Amount::from(300)); + } +} diff --git a/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs b/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs new file mode 100644 index 0000000000..b92c779101 --- /dev/null +++ b/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs @@ -0,0 +1,143 @@ +//! Code for handling validator set update protocol txs. + +use std::collections::{HashMap, HashSet}; + +use eyre::Result; +use namada_core::ledger::storage::{DBIter, Storage, StorageHasher, DB}; +use namada_core::types::address::Address; +use namada_core::types::storage::BlockHeight; +#[allow(unused_imports)] +use namada_core::types::transaction::protocol::ProtocolTxType; +use namada_core::types::transaction::TxResult; +use namada_core::types::vote_extensions::validator_set_update; +use namada_core::types::voting_power::FractionalVotingPower; +use namada_proof_of_stake::pos_queries::PosQueries; + +use super::ChangedKeys; +use crate::protocol::transactions::utils; +use crate::protocol::transactions::votes::update::NewVotes; +use crate::protocol::transactions::votes::{self, Votes}; +use crate::storage::vote_tallies; + +impl utils::GetVoters for validator_set_update::VextDigest { + #[inline] + fn get_voters(&self) -> HashSet<(Address, BlockHeight)> { + self.signatures.keys().cloned().collect() + } +} + +pub fn aggregate_votes( + storage: &mut Storage, + ext: validator_set_update::VextDigest, +) -> Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + if ext.signatures.is_empty() { + tracing::debug!("Ignoring empty validator set update"); + return Ok(Default::default()); + } + + tracing::info!( + num_votes = ext.signatures.len(), + "Aggregating new votes for validator set update" + ); + + let voting_powers = utils::get_voting_powers(storage, &ext)?; + let changed_keys = apply_update(storage, ext, voting_powers)?; + + Ok(TxResult { + changed_keys, + ..Default::default() + }) +} + +fn apply_update( + storage: &mut Storage, + ext: validator_set_update::VextDigest, + voting_powers: HashMap<(Address, BlockHeight), FractionalVotingPower>, +) -> Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let epoch = { + // all votes we gathered are for the same epoch, so + // we can just fetch the block height from the first + // signature we iterate over, and calculate its cor- + // responding epoch + let height = ext + .signatures + .keys() + .map(|(_, height)| *height) + .by_ref() + .next() + .expect( + "We have at least one signature present in this validator set \ + update vote extension digest", + ); + + storage + .get_epoch(height) + .expect("The epoch of the given block height should be known") + }; + + let valset_upd_keys = vote_tallies::Keys::from(&epoch); + let (exists_in_storage, _) = storage.has_key(&valset_upd_keys.seen())?; + + let mut seen_by = Votes::default(); + for (address, block_height) in ext.signatures.into_keys() { + if let Some(present) = seen_by.insert(address, block_height) { + // TODO(namada#770): this shouldn't be happening in any case and we + // should be refactoring to get rid of `BlockHeight` + tracing::warn!(?present, "Duplicate vote in digest"); + } + } + + let (tally, changed, confirmed) = if !exists_in_storage { + tracing::debug!( + %valset_upd_keys.prefix, + ?ext.voting_powers, + "New validator set update vote aggregation started" + ); + let tally = votes::calculate_new(seen_by, &voting_powers)?; + let changed = valset_upd_keys.into_iter().collect(); + let confirmed = tally.seen; + (tally, changed, confirmed) + } else { + tracing::debug!( + %valset_upd_keys.prefix, + "Validator set update votes already in storage", + ); + let new_votes = NewVotes::new(seen_by, &voting_powers)?; + let (tally, changed) = + votes::update::calculate(storage, &valset_upd_keys, new_votes)?; + if changed.is_empty() { + return Ok(changed); + } + let confirmed = tally.seen && changed.contains(&valset_upd_keys.seen()); + (tally, changed, confirmed) + }; + + tracing::debug!( + ?tally, + ?ext.voting_powers, + "Applying validator set update state changes" + ); + votes::storage::write( + storage, + &valset_upd_keys, + &ext.voting_powers, + &tally, + )?; + + if confirmed { + tracing::debug!( + %valset_upd_keys.prefix, + "Acquired complete proof on validator set update" + ); + } + + Ok(changed) +} diff --git a/ethereum_bridge/src/protocol/transactions/votes.rs b/ethereum_bridge/src/protocol/transactions/votes.rs new file mode 100644 index 0000000000..3ffba10d76 --- /dev/null +++ b/ethereum_bridge/src/protocol/transactions/votes.rs @@ -0,0 +1,173 @@ +//! Logic and data types relating to tallying validators' votes for pieces of +//! data stored in the ledger, where those pieces of data should only be acted +//! on once they have received enough votes +use std::collections::{BTreeMap, BTreeSet, HashMap}; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use eyre::{eyre, Result}; +use namada_core::types::address::Address; +use namada_core::types::storage::BlockHeight; +use namada_core::types::voting_power::FractionalVotingPower; + +use super::{read, ChangedKeys}; + +pub(super) mod storage; +pub(super) mod update; + +/// The addresses of validators that voted for something, and the block +/// heights at which they voted. We use a [`BTreeMap`] to enforce that a +/// validator (as uniquely identified by an [`Address`]) may vote at most once, +/// and their vote must be associated with a specific [`BlockHeight`]. Their +/// voting power at that block height is what is used when calculating whether +/// something has enough voting power behind it or not. +pub type Votes = BTreeMap; + +#[derive( + Clone, Debug, PartialEq, Eq, BorshSerialize, BorshDeserialize, BorshSchema, +)] +/// Represents all the information needed to tally a piece of data that may be +/// voted for over multiple epochs +pub struct Tally { + /// The total voting power that's voted for this event across all epochs + pub voting_power: FractionalVotingPower, + /// The votes which have been counted towards `voting_power`. Note that + /// validators may submit multiple votes at different block heights for + /// the same thing, but ultimately only one vote per validator will be + /// used when tallying voting power. + pub seen_by: Votes, + /// Whether this event has been acted on or not - this should only ever + /// transition from `false` to `true`, once there is enough voting power + pub seen: bool, +} + +/// Calculate a new [`Tally`] based on some validators' fractional voting powers +/// as specific block heights +pub fn calculate_new( + seen_by: Votes, + voting_powers: &HashMap<(Address, BlockHeight), FractionalVotingPower>, +) -> Result { + let mut seen_by_voting_power = FractionalVotingPower::default(); + for (validator, block_height) in seen_by.iter() { + match voting_powers + .get(&(validator.to_owned(), block_height.to_owned())) + { + Some(voting_power) => seen_by_voting_power += voting_power, + None => { + return Err(eyre!( + "voting power was not provided for validator {}", + validator + )); + } + }; + } + + let newly_confirmed = + seen_by_voting_power > FractionalVotingPower::TWO_THIRDS; + Ok(Tally { + voting_power: seen_by_voting_power, + seen_by, + seen: newly_confirmed, + }) +} + +/// Deterministically constructs a [`Votes`] map from a set of validator +/// addresses and the block heights they signed something at. We arbitrarily +/// take the earliest block height for each validator address encountered. +pub fn dedupe(signers: BTreeSet<(Address, BlockHeight)>) -> Votes { + signers.into_iter().rev().collect() +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeSet; + + use namada_core::types::address; + use namada_core::types::storage::BlockHeight; + + use super::*; + + #[test] + fn test_dedupe_empty() { + let signers = BTreeSet::new(); + + let deduped = dedupe(signers); + + assert_eq!(deduped, Votes::new()); + } + + #[test] + fn test_dedupe_single_vote() { + let sole_validator = address::testing::established_address_1(); + let votes = [(sole_validator, BlockHeight(100))]; + let signers = BTreeSet::from(votes.clone()); + + let deduped = dedupe(signers); + + assert_eq!(deduped, Votes::from(votes)); + } + + #[test] + fn test_dedupe_multiple_votes_same_voter() { + let sole_validator = address::testing::established_address_1(); + let earliest_vote_height = 100; + let earliest_vote = + (sole_validator.clone(), BlockHeight(earliest_vote_height)); + let votes = [ + earliest_vote.clone(), + ( + sole_validator.clone(), + BlockHeight(earliest_vote_height + 1), + ), + (sole_validator, BlockHeight(earliest_vote_height + 100)), + ]; + let signers = BTreeSet::from(votes); + + let deduped = dedupe(signers); + + assert_eq!(deduped, Votes::from([earliest_vote])); + } + + #[test] + fn test_dedupe_multiple_votes_multiple_voters() { + let validator_1 = address::testing::established_address_1(); + let validator_2 = address::testing::established_address_2(); + let validator_1_earliest_vote_height = 100; + let validator_1_earliest_vote = ( + validator_1.clone(), + BlockHeight(validator_1_earliest_vote_height), + ); + let validator_2_earliest_vote_height = 200; + let validator_2_earliest_vote = ( + validator_2.clone(), + BlockHeight(validator_2_earliest_vote_height), + ); + let votes = [ + validator_1_earliest_vote.clone(), + ( + validator_1.clone(), + BlockHeight(validator_1_earliest_vote_height + 1), + ), + ( + validator_1, + BlockHeight(validator_1_earliest_vote_height + 100), + ), + validator_2_earliest_vote.clone(), + ( + validator_2.clone(), + BlockHeight(validator_2_earliest_vote_height + 1), + ), + ( + validator_2, + BlockHeight(validator_2_earliest_vote_height + 100), + ), + ]; + let signers = BTreeSet::from(votes); + + let deduped = dedupe(signers); + + assert_eq!( + deduped, + Votes::from([validator_1_earliest_vote, validator_2_earliest_vote]) + ); + } +} diff --git a/ethereum_bridge/src/protocol/transactions/votes/storage.rs b/ethereum_bridge/src/protocol/transactions/votes/storage.rs new file mode 100644 index 0000000000..918b1efe1e --- /dev/null +++ b/ethereum_bridge/src/protocol/transactions/votes/storage.rs @@ -0,0 +1,129 @@ +use borsh::BorshSerialize; +use eyre::Result; +use namada_core::ledger::storage::{DBIter, Storage, StorageHasher, DB}; +use namada_core::types::voting_power::FractionalVotingPower; + +use super::{Tally, Votes}; +use crate::storage::vote_tallies; + +pub fn write( + storage: &mut Storage, + keys: &vote_tallies::Keys, + body: &T, + tally: &Tally, +) -> Result<()> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + T: BorshSerialize, +{ + storage.write(&keys.body(), &body.try_to_vec()?)?; + storage.write(&keys.seen(), &tally.seen.try_to_vec()?)?; + storage.write(&keys.seen_by(), &tally.seen_by.try_to_vec()?)?; + storage.write(&keys.voting_power(), &tally.voting_power.try_to_vec()?)?; + Ok(()) +} + +#[allow(dead_code)] +pub fn read( + storage: &mut Storage, + keys: &vote_tallies::Keys, +) -> Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let seen: bool = super::read::value(storage, &keys.seen())?; + let seen_by: Votes = super::read::value(storage, &keys.seen_by())?; + let voting_power: FractionalVotingPower = + super::read::value(storage, &keys.voting_power())?; + + Ok(Tally { + voting_power, + seen_by, + seen, + }) +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + + use namada_core::ledger::storage::testing::TestStorage; + use namada_core::types::address; + use namada_core::types::ethereum_events::EthereumEvent; + use namada_core::types::voting_power::FractionalVotingPower; + + use super::*; + + #[test] + fn test_write_tally() { + let mut storage = TestStorage::default(); + let event = EthereumEvent::TransfersToNamada { + nonce: 0.into(), + transfers: vec![], + }; + let keys = vote_tallies::Keys::from(&event); + let tally = Tally { + voting_power: FractionalVotingPower::new(1, 3).unwrap(), + seen_by: BTreeMap::from([( + address::testing::established_address_1(), + 10.into(), + )]), + seen: false, + }; + + let result = write(&mut storage, &keys, &event, &tally); + + assert!(result.is_ok()); + let (body, _) = storage.read(&keys.body()).unwrap(); + assert_eq!(body, Some(event.try_to_vec().unwrap())); + let (seen, _) = storage.read(&keys.seen()).unwrap(); + assert_eq!(seen, Some(tally.seen.try_to_vec().unwrap())); + let (seen_by, _) = storage.read(&keys.seen_by()).unwrap(); + assert_eq!(seen_by, Some(tally.seen_by.try_to_vec().unwrap())); + let (voting_power, _) = storage.read(&keys.voting_power()).unwrap(); + assert_eq!( + voting_power, + Some(tally.voting_power.try_to_vec().unwrap()) + ); + } + + #[test] + fn test_read_tally() { + let mut storage = TestStorage::default(); + let event = EthereumEvent::TransfersToNamada { + nonce: 0.into(), + transfers: vec![], + }; + let keys = vote_tallies::Keys::from(&event); + let tally = Tally { + voting_power: FractionalVotingPower::new(1, 3).unwrap(), + seen_by: BTreeMap::from([( + address::testing::established_address_1(), + 10.into(), + )]), + seen: false, + }; + storage + .write(&keys.body(), &event.try_to_vec().unwrap()) + .unwrap(); + storage + .write(&keys.seen(), &tally.seen.try_to_vec().unwrap()) + .unwrap(); + storage + .write(&keys.seen_by(), &tally.seen_by.try_to_vec().unwrap()) + .unwrap(); + storage + .write( + &keys.voting_power(), + &tally.voting_power.try_to_vec().unwrap(), + ) + .unwrap(); + + let result = read(&mut storage, &keys); + + assert!(result.is_ok()); + assert_eq!(result.unwrap(), tally); + } +} diff --git a/ethereum_bridge/src/protocol/transactions/votes/update.rs b/ethereum_bridge/src/protocol/transactions/votes/update.rs new file mode 100644 index 0000000000..1a58f7faee --- /dev/null +++ b/ethereum_bridge/src/protocol/transactions/votes/update.rs @@ -0,0 +1,550 @@ +use std::collections::{BTreeSet, HashMap, HashSet}; + +use borsh::BorshDeserialize; +use eyre::{eyre, Result}; +use namada_core::ledger::storage::{DBIter, Storage, StorageHasher, DB}; +use namada_core::types::address::Address; +use namada_core::types::storage::BlockHeight; +use namada_core::types::voting_power::FractionalVotingPower; + +use super::{ChangedKeys, Tally, Votes}; +use crate::storage::vote_tallies; + +/// Wraps all the information about new votes to be applied to some existing +/// tally in storage. +pub(in super::super) struct NewVotes { + inner: HashMap, +} + +impl NewVotes { + /// Constructs a new [`NewVotes`]. + /// + /// For all `votes` provided, a corresponding [`FractionalVotingPower`] must + /// be provided in `voting_powers` also, otherwise an error will be + /// returned. + pub fn new( + votes: Votes, + voting_powers: &HashMap<(Address, BlockHeight), FractionalVotingPower>, + ) -> Result { + let mut inner = HashMap::default(); + for vote in votes { + let fract_voting_power = match voting_powers.get(&vote) { + Some(fract_voting_power) => fract_voting_power, + None => { + let (address, block_height) = vote; + return Err(eyre!( + "No fractional voting power provided for vote by \ + validator {address} at block height {block_height}" + )); + } + }; + let (address, block_height) = vote; + _ = inner + .insert(address, (block_height, fract_voting_power.to_owned())); + } + Ok(Self { inner }) + } + + pub fn voters(&self) -> BTreeSet
{ + self.inner.keys().cloned().collect() + } + + /// Consumes `self` and returns a [`NewVotes`] with any addresses from + /// `voters` removed, as well as the set of addresses that were actually + /// removed. Useful for removing voters who have already voted for + /// something. + pub fn without_voters<'a>( + self, + voters: impl IntoIterator, + ) -> (Self, HashSet<&'a Address>) { + let mut inner = self.inner; + let mut removed = HashSet::default(); + for voter in voters { + inner.remove(voter); + removed.insert(voter); + } + (Self { inner }, removed) + } +} + +impl IntoIterator for NewVotes { + type IntoIter = std::collections::hash_set::IntoIter; + type Item = (Address, BlockHeight, FractionalVotingPower); + + fn into_iter(self) -> Self::IntoIter { + let items: HashSet<_> = self + .inner + .into_iter() + .map(|(address, (block_height, fract_voting_power))| { + (address, block_height, fract_voting_power) + }) + .collect(); + items.into_iter() + } +} + +/// Calculate an updated [`Tally`] based on one that is in storage under `keys`, +/// with new votes from `vote_info` applied, as well as the storage keys that +/// would change. If [`Tally`] is already `seen = true` in storage, then no +/// votes from `vote_info` should be applied, and the returned changed keys will +/// be empty. +pub(in super::super) fn calculate( + store: &mut Storage, + keys: &vote_tallies::Keys, + vote_info: NewVotes, +) -> Result<(Tally, ChangedKeys)> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + T: BorshDeserialize, +{ + tracing::info!( + ?keys.prefix, + validators = ?vote_info.voters(), + "Calculating validators' votes applied to an existing tally" + ); + let tally_pre = super::storage::read(store, keys)?; + if tally_pre.seen { + return Ok((tally_pre, ChangedKeys::default())); + } + + let (vote_info, duplicate_voters) = + vote_info.without_voters(tally_pre.seen_by.keys()); + for voter in duplicate_voters { + tracing::info!( + ?keys.prefix, + ?voter, + "Ignoring duplicate voter" + ); + } + let tally_post = apply(&tally_pre, vote_info) + .expect("We deduplicated voters already, so this should never error"); + + let changed_keys = keys_changed(keys, &tally_pre, &tally_post); + + if tally_post.seen { + tracing::info!( + ?keys.prefix, + "Tally has been seen by a quorum of validators", + ); + } else { + tracing::debug!( + ?keys.prefix, + "Tally is not yet seen by a quorum of validators", + ); + }; + + tracing::debug!( + ?tally_pre, + ?tally_post, + "Calculated and validated vote tracking updates", + ); + Ok((tally_post, changed_keys)) +} + +/// Takes an existing [`Tally`] and calculates the new [`Tally`] based on new +/// voters from `vote_info`. An error is returned if any validator which +/// previously voted is present in `vote_info`. +fn apply(tally: &Tally, vote_info: NewVotes) -> Result { + let mut voting_power_post = tally.voting_power.clone(); + let mut seen_by_post = tally.seen_by.clone(); + for (validator, vote_height, voting_power) in vote_info { + if let Some(already_voted_height) = + seen_by_post.insert(validator.clone(), vote_height) + { + return Err(eyre!( + "Validator {validator} had already voted at height \ + {already_voted_height}", + )); + }; + voting_power_post += voting_power; + } + + let seen_post = voting_power_post > FractionalVotingPower::TWO_THIRDS; + + Ok(Tally { + voting_power: voting_power_post, + seen_by: seen_by_post, + seen: seen_post, + }) +} + +/// Straightforwardly calculates the keys that changed between `pre` and `post`. +fn keys_changed( + keys: &vote_tallies::Keys, + pre: &Tally, + post: &Tally, +) -> ChangedKeys { + let mut changed_keys = ChangedKeys::default(); + if pre.seen != post.seen { + changed_keys.insert(keys.seen()); + }; + if pre.voting_power != post.voting_power { + changed_keys.insert(keys.voting_power()); + }; + if pre.seen_by != post.seen_by { + changed_keys.insert(keys.seen_by()); + }; + changed_keys +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + + use namada_core::ledger::storage::testing::TestStorage; + use namada_core::types::address; + use namada_core::types::ethereum_events::EthereumEvent; + + use super::*; + use crate::protocol::transactions::votes; + use crate::protocol::transactions::votes::update::tests::helpers::{ + arbitrary_event, setup_tally, + }; + + mod helpers { + use super::*; + + /// Returns an arbitrary piece of data that can have votes tallied + /// against it. + pub(super) fn arbitrary_event() -> EthereumEvent { + EthereumEvent::TransfersToNamada { + nonce: 0.into(), + transfers: vec![], + } + } + + /// Writes an initial [`Tally`] to storage, based on the passed `votes`. + pub(super) fn setup_tally( + storage: &mut TestStorage, + event: &EthereumEvent, + keys: &vote_tallies::Keys, + votes: HashSet<(Address, BlockHeight, FractionalVotingPower)>, + ) -> Result { + let voting_power: FractionalVotingPower = + votes.iter().cloned().map(|(_, _, v)| v).sum(); + let tally = Tally { + voting_power: voting_power.to_owned(), + seen_by: votes.into_iter().map(|(a, h, _)| (a, h)).collect(), + seen: voting_power > FractionalVotingPower::TWO_THIRDS, + }; + votes::storage::write(storage, keys, event, &tally)?; + Ok(tally) + } + } + + #[test] + fn test_vote_info_new_empty() -> Result<()> { + let voting_powers = HashMap::default(); + + let vote_info = NewVotes::new(Votes::default(), &voting_powers)?; + + assert!(vote_info.voters().is_empty()); + assert_eq!(vote_info.into_iter().count(), 0); + Ok(()) + } + + #[test] + fn test_vote_info_new_single_voter() -> Result<()> { + let validator = address::testing::established_address_1(); + let vote_height = BlockHeight(100); + let voting_power = FractionalVotingPower::new(1, 3)?; + let vote = (validator.clone(), vote_height); + let votes = Votes::from([vote.clone()]); + let voting_powers = HashMap::from([(vote, voting_power.clone())]); + + let vote_info = NewVotes::new(votes, &voting_powers)?; + + assert_eq!(vote_info.voters(), BTreeSet::from([validator.clone()])); + let votes: BTreeSet<_> = vote_info.into_iter().collect(); + assert_eq!( + votes, + BTreeSet::from([(validator, vote_height, voting_power,)]), + ); + Ok(()) + } + + #[test] + fn test_vote_info_new_error() -> Result<()> { + let votes = Votes::from([( + address::testing::established_address_1(), + BlockHeight(100), + )]); + let voting_powers = HashMap::default(); + + let result = NewVotes::new(votes, &voting_powers); + + assert!(result.is_err()); + Ok(()) + } + + #[test] + fn test_vote_info_without_voters() -> Result<()> { + let validator = address::testing::established_address_1(); + let vote_height = BlockHeight(100); + let voting_power = FractionalVotingPower::new(1, 3)?; + let vote = (validator.clone(), vote_height); + let votes = Votes::from([vote.clone()]); + let voting_powers = HashMap::from([(vote, voting_power)]); + let vote_info = NewVotes::new(votes, &voting_powers)?; + + let (vote_info, removed) = vote_info.without_voters(vec![&validator]); + + assert!(vote_info.voters().is_empty()); + assert_eq!(removed, HashSet::from([&validator])); + Ok(()) + } + + #[test] + fn test_apply_duplicate_votes() -> Result<()> { + let mut storage = TestStorage::default(); + + let validator = address::testing::established_address_1(); + let already_voted_height = BlockHeight(100); + + let event = arbitrary_event(); + let keys = vote_tallies::Keys::from(&event); + let tally_pre = setup_tally( + &mut storage, + &event, + &keys, + HashSet::from([( + validator.clone(), + already_voted_height, + FractionalVotingPower::new(1, 3)?, + )]), + )?; + + let votes = Votes::from([(validator.clone(), BlockHeight(1000))]); + let voting_powers = HashMap::from([( + (validator, BlockHeight(1000)), + FractionalVotingPower::new(1, 3)?, + )]); + let vote_info = NewVotes::new(votes, &voting_powers)?; + + let result = apply(&tally_pre, vote_info); + + assert!(result.is_err()); + Ok(()) + } + + /// Tests that an unchanged tally is returned if the tally as in storage is + /// already recorded as having been seen. + #[test] + fn test_calculate_already_seen() -> Result<()> { + let mut storage = TestStorage::default(); + let event = arbitrary_event(); + let keys = vote_tallies::Keys::from(&event); + let tally_pre = setup_tally( + &mut storage, + &event, + &keys, + HashSet::from([( + address::testing::established_address_1(), + BlockHeight(10), + FractionalVotingPower::new(3, 4)?, // this is > 2/3 + )]), + )?; + + let validator = address::testing::established_address_2(); + let vote_height = BlockHeight(100); + let voting_power = FractionalVotingPower::new(1, 3)?; + let vote = (validator, vote_height); + let votes = Votes::from([vote.clone()]); + let voting_powers = HashMap::from([(vote, voting_power)]); + let vote_info = NewVotes::new(votes, &voting_powers)?; + + let (tally_post, changed_keys) = + calculate(&mut storage, &keys, vote_info)?; + + assert_eq!(tally_post, tally_pre); + assert!(changed_keys.is_empty()); + Ok(()) + } + + /// Tests that an unchanged tally is returned if no votes are passed. + #[test] + fn test_calculate_empty() -> Result<()> { + let mut storage = TestStorage::default(); + let event = arbitrary_event(); + let keys = vote_tallies::Keys::from(&event); + let tally_pre = setup_tally( + &mut storage, + &event, + &keys, + HashSet::from([( + address::testing::established_address_1(), + BlockHeight(10), + FractionalVotingPower::new(1, 3)?, + )]), + )?; + votes::storage::write(&mut storage, &keys, &event, &tally_pre)?; + let vote_info = NewVotes::new(Votes::default(), &HashMap::default())?; + + let (tally_post, changed_keys) = + calculate(&mut storage, &keys, vote_info)?; + + assert_eq!(tally_post, tally_pre); + assert!(changed_keys.is_empty()); + Ok(()) + } + + /// Tests the case where a single vote is applied, and the tally is still + /// not yet seen. + #[test] + fn test_calculate_one_vote_not_seen() -> Result<()> { + let mut storage = TestStorage::default(); + + let event = arbitrary_event(); + let keys = vote_tallies::Keys::from(&event); + let tally_pre = setup_tally( + &mut storage, + &event, + &keys, + HashSet::from([( + address::testing::established_address_1(), + BlockHeight(10), + FractionalVotingPower::new(1, 3)?, + )]), + )?; + votes::storage::write(&mut storage, &keys, &event, &tally_pre)?; + + let validator = address::testing::established_address_2(); + let vote_height = BlockHeight(100); + let voting_power = FractionalVotingPower::new(1, 3)?; + let vote = (validator, vote_height); + let votes = Votes::from([vote.clone()]); + let voting_powers = HashMap::from([(vote.clone(), voting_power)]); + let vote_info = NewVotes::new(votes, &voting_powers)?; + + let (tally_post, changed_keys) = + calculate(&mut storage, &keys, vote_info)?; + + assert_eq!( + tally_post, + Tally { + voting_power: FractionalVotingPower::new(2, 3)?, + seen_by: BTreeMap::from([ + (address::testing::established_address_1(), 10.into()), + vote, + ]), + seen: false, + } + ); + assert_eq!( + changed_keys, + BTreeSet::from([keys.voting_power(), keys.seen_by()]) + ); + Ok(()) + } + + /// Tests the case where a single vote is applied, and the tally is now + /// seen. + #[test] + fn test_calculate_one_vote_seen() -> Result<()> { + let mut storage = TestStorage::default(); + + let event = arbitrary_event(); + let keys = vote_tallies::Keys::from(&event); + let tally_pre = setup_tally( + &mut storage, + &event, + &keys, + HashSet::from([( + address::testing::established_address_1(), + BlockHeight(10), + FractionalVotingPower::new(1, 3)?, + )]), + )?; + votes::storage::write(&mut storage, &keys, &event, &tally_pre)?; + + let validator = address::testing::established_address_2(); + let vote_height = BlockHeight(100); + let voting_power = FractionalVotingPower::new(2, 3)?; + let vote = (validator, vote_height); + let votes = Votes::from([vote.clone()]); + let voting_powers = HashMap::from([(vote.clone(), voting_power)]); + let vote_info = NewVotes::new(votes, &voting_powers)?; + + let (tally_post, changed_keys) = + calculate(&mut storage, &keys, vote_info)?; + + assert_eq!( + tally_post, + Tally { + voting_power: FractionalVotingPower::new(1, 1)?, + seen_by: BTreeMap::from([ + (address::testing::established_address_1(), 10.into()), + vote, + ]), + seen: true, + } + ); + assert_eq!( + changed_keys, + BTreeSet::from([keys.voting_power(), keys.seen_by(), keys.seen()]) + ); + Ok(()) + } + + #[test] + fn test_keys_changed_all() -> Result<()> { + let voting_power_a = FractionalVotingPower::new(1, 3)?; + let voting_power_b = FractionalVotingPower::new(2, 3)?; + + let seen_a = false; + let seen_b = true; + + let seen_by_a = BTreeMap::from([( + address::testing::established_address_1(), + BlockHeight(10), + )]); + let seen_by_b = BTreeMap::from([( + address::testing::established_address_2(), + BlockHeight(20), + )]); + + let event = arbitrary_event(); + let keys = vote_tallies::Keys::from(&event); + let pre = Tally { + voting_power: voting_power_a, + seen: seen_a, + seen_by: seen_by_a, + }; + let post = Tally { + voting_power: voting_power_b, + seen: seen_b, + seen_by: seen_by_b, + }; + let changed_keys = keys_changed(&keys, &pre, &post); + + assert_eq!( + changed_keys, + BTreeSet::from([keys.seen(), keys.seen_by(), keys.voting_power()]) + ); + Ok(()) + } + + #[test] + fn test_keys_changed_none() -> Result<()> { + let voting_power = FractionalVotingPower::new(1, 3)?; + let seen = false; + let seen_by = BTreeMap::from([( + address::testing::established_address_1(), + BlockHeight(10), + )]); + + let event = arbitrary_event(); + let keys = vote_tallies::Keys::from(&event); + let pre = Tally { + voting_power, + seen, + seen_by, + }; + let post = pre.clone(); + let changed_keys = keys_changed(&keys, &pre, &post); + + assert!(changed_keys.is_empty()); + Ok(()) + } +} diff --git a/ethereum_bridge/src/storage/mod.rs b/ethereum_bridge/src/storage/mod.rs new file mode 100644 index 0000000000..a71644d9af --- /dev/null +++ b/ethereum_bridge/src/storage/mod.rs @@ -0,0 +1,4 @@ +//! Functionality for accessing the storage subspace +pub use namada_core::ledger::eth_bridge::storage::bridge_pool; +pub mod vote_tallies; +pub use namada_core::ledger::eth_bridge::storage::{wrapped_erc20s, *}; diff --git a/ethereum_bridge/src/storage/vote_tallies.rs b/ethereum_bridge/src/storage/vote_tallies.rs new file mode 100644 index 0000000000..57dd75a0ea --- /dev/null +++ b/ethereum_bridge/src/storage/vote_tallies.rs @@ -0,0 +1,236 @@ +//! Functionality for accessing keys to do with tallying votes + +use namada_core::types::ethereum_events::EthereumEvent; +use namada_core::types::hash::Hash; +use namada_core::types::storage::{Epoch, Key}; +use namada_core::types::vote_extensions::validator_set_update::VotingPowersMap; + +/// Storage sub-key space reserved to keeping track of the +/// voting power assigned to Ethereum events. +pub const ETH_MSGS_PREFIX_KEY_SEGMENT: &str = "eth_msgs"; + +/// Storage sub-key space reserved to keeping track of the +/// voting power assigned to validator set updates. +pub const VALSET_UPDS_PREFIX_KEY_SEGMENT: &str = "validator_set_updates"; + +const BODY_KEY_SEGMENT: &str = "body"; +const SEEN_KEY_SEGMENT: &str = "seen"; +const SEEN_BY_KEY_SEGMENT: &str = "seen_by"; +const VOTING_POWER_KEY_SEGMENT: &str = "voting_power"; + +/// Generator for the keys under which details of votes for some piece of data +/// is stored +pub struct Keys { + /// The prefix under which the details of a piece of data for which we are + /// tallying votes is stored + pub prefix: Key, + _phantom: std::marker::PhantomData<*const T>, +} + +impl Keys { + /// Get the `body` key - there should be a Borsh-serialized `T` stored + /// here. + pub fn body(&self) -> Key { + self.prefix + .push(&BODY_KEY_SEGMENT.to_owned()) + .expect("should always be able to construct this key") + } + + /// Get the `seen` key - there should be a [`bool`] stored here. + pub fn seen(&self) -> Key { + self.prefix + .push(&SEEN_KEY_SEGMENT.to_owned()) + .expect("should always be able to construct this key") + } + + /// Get the `seen_by` key - there should be a `BTreeSet
` stored + /// here. + pub fn seen_by(&self) -> Key { + self.prefix + .push(&SEEN_BY_KEY_SEGMENT.to_owned()) + .expect("should always be able to construct this key") + } + + /// Get the `voting_power` key - there should be a `(u64, u64)` stored + /// here. + pub fn voting_power(&self) -> Key { + self.prefix + .push(&VOTING_POWER_KEY_SEGMENT.to_owned()) + .expect("should always be able to construct this key") + } +} + +impl IntoIterator for &Keys { + type IntoIter = std::vec::IntoIter; + type Item = Key; + + fn into_iter(self) -> Self::IntoIter { + vec![ + self.body(), + self.seen(), + self.seen_by(), + self.voting_power(), + ] + .into_iter() + } +} + +/// Get the key prefix corresponding to the storage location of +/// [`EthereumEvent`]s whose "seen" state is being tracked. +pub fn eth_msgs_prefix() -> Key { + super::prefix() + .push(Ð_MSGS_PREFIX_KEY_SEGMENT.to_owned()) + .expect("should always be able to construct this key") +} + +impl From<&EthereumEvent> for Keys { + fn from(event: &EthereumEvent) -> Self { + let hash = event + .hash() + .expect("should always be able to hash Ethereum events"); + (&hash).into() + } +} + +impl From<&Hash> for Keys { + fn from(hash: &Hash) -> Self { + let hex = format!("{}", hash); + let prefix = eth_msgs_prefix() + .push(&hex) + .expect("should always be able to construct this key"); + Keys { + prefix, + _phantom: std::marker::PhantomData, + } + } +} + +/// Get the key prefix corresponding to the storage location of validator set +/// updates whose "seen" state is being tracked. +pub fn valset_upds_prefix() -> Key { + super::prefix() + .push(&VALSET_UPDS_PREFIX_KEY_SEGMENT.to_owned()) + .expect("should always be able to construct this key") +} + +impl From<&Epoch> for Keys { + fn from(epoch: &Epoch) -> Self { + let prefix = valset_upds_prefix() + .push(epoch) + .expect("should always be able to construct this key"); + Keys { + prefix, + _phantom: std::marker::PhantomData, + } + } +} + +#[cfg(test)] +mod test { + use assert_matches::assert_matches; + use namada_core::ledger::eth_bridge::ADDRESS; + use namada_core::types::storage::DbKeySeg; + + use super::*; + + mod helpers { + use super::*; + + pub(super) fn arbitrary_event_with_hash() -> (EthereumEvent, String) { + ( + EthereumEvent::TransfersToNamada { + nonce: 1.into(), + transfers: vec![], + }, + "06799912C0FD8785EE29E13DFB84FE2778AF6D9CA026BD5B054F86CE9FE8C017" + .to_owned(), + ) + } + } + + #[test] + fn test_eth_msgs_prefix() { + assert_matches!(ð_msgs_prefix().segments[..], [ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(s), + ] if s == ETH_MSGS_PREFIX_KEY_SEGMENT) + } + + #[test] + fn test_ethereum_event_keys_all_keys() { + let (event, hash) = helpers::arbitrary_event_with_hash(); + let keys: Keys = (&event).into(); + let prefix = vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(ETH_MSGS_PREFIX_KEY_SEGMENT.to_owned()), + DbKeySeg::StringSeg(hash), + ]; + let body_key = keys.body(); + assert_eq!(body_key.segments[..3], prefix[..]); + assert_eq!( + body_key.segments[3], + DbKeySeg::StringSeg(BODY_KEY_SEGMENT.to_owned()) + ); + + let seen_key = keys.seen(); + assert_eq!(seen_key.segments[..3], prefix[..]); + assert_eq!( + seen_key.segments[3], + DbKeySeg::StringSeg(SEEN_KEY_SEGMENT.to_owned()) + ); + + let seen_by_key = keys.seen_by(); + assert_eq!(seen_by_key.segments[..3], prefix[..]); + assert_eq!( + seen_by_key.segments[3], + DbKeySeg::StringSeg(SEEN_BY_KEY_SEGMENT.to_owned()) + ); + + let voting_power_key = keys.voting_power(); + assert_eq!(voting_power_key.segments[..3], prefix[..]); + assert_eq!( + voting_power_key.segments[3], + DbKeySeg::StringSeg(VOTING_POWER_KEY_SEGMENT.to_owned()) + ); + } + + #[test] + fn test_ethereum_event_keys_into_iter() { + let (event, _) = helpers::arbitrary_event_with_hash(); + let keys: Keys = (&event).into(); + let as_keys: Vec<_> = keys.into_iter().collect(); + assert_eq!( + as_keys, + vec![ + keys.body(), + keys.seen(), + keys.seen_by(), + keys.voting_power(), + ] + ); + } + + #[test] + fn test_ethereum_event_keys_from_ethereum_event() { + let (event, hash) = helpers::arbitrary_event_with_hash(); + let keys: Keys = (&event).into(); + let expected = vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(ETH_MSGS_PREFIX_KEY_SEGMENT.to_owned()), + DbKeySeg::StringSeg(hash), + ]; + assert_eq!(&keys.prefix.segments[..], &expected[..]); + } + + #[test] + fn test_ethereum_event_keys_from_hash() { + let (event, hash) = helpers::arbitrary_event_with_hash(); + let keys: Keys = (&event.hash().unwrap()).into(); + let expected = vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(ETH_MSGS_PREFIX_KEY_SEGMENT.to_owned()), + DbKeySeg::StringSeg(hash), + ]; + assert_eq!(&keys.prefix.segments[..], &expected[..]); + } +} diff --git a/ethereum_bridge/src/vp.rs b/ethereum_bridge/src/vp.rs new file mode 100644 index 0000000000..c857498170 --- /dev/null +++ b/ethereum_bridge/src/vp.rs @@ -0,0 +1,28 @@ +use borsh::BorshSerialize; +use namada_core::ledger::storage::{self as ledger_storage, StorageHasher}; +use namada_core::types::address::nam; +use namada_core::types::token::{balance_key, Amount}; + +/// Initialize the storage owned by the Ethereum Bridge VP. +/// +/// This means that the amount of escrowed Nam is +/// initialized to 0. +pub fn init_storage(storage: &mut ledger_storage::Storage) +where + D: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: StorageHasher, +{ + let escrow_key = + balance_key(&nam(), &namada_core::ledger::eth_bridge::ADDRESS); + storage + .write( + &escrow_key, + Amount::default() + .try_to_vec() + .expect("Serializing an amount shouldn't fail."), + ) + .expect( + "Initializing the escrow balance of the Ethereum Bridge VP \ + shouldn't fail.", + ); +} diff --git a/genesis/dev.toml b/genesis/dev.toml index 3c8f67c3a1..ea02aabcfe 100644 --- a/genesis/dev.toml +++ b/genesis/dev.toml @@ -146,6 +146,8 @@ min_num_of_blocks = 10 max_expected_time_per_block = 30 # Expected epochs per year (also sets the minimum duration of an epoch in seconds) epochs_per_year = 525_600 +# Max payload size, in bytes, for a tx batch proposal. +max_proposal_bytes = 22020096 # Proof of stake parameters. [pos_params] diff --git a/genesis/e2e-tests-single-node.toml b/genesis/e2e-tests-single-node.toml index 554dfd57b1..249e55ef6d 100644 --- a/genesis/e2e-tests-single-node.toml +++ b/genesis/e2e-tests-single-node.toml @@ -151,6 +151,8 @@ filename = "vp_masp.wasm" min_num_of_blocks = 4 # Maximum expected time per block (in seconds). max_expected_time_per_block = 30 +# Max payload size, in bytes, for a tx batch proposal. +max_proposal_bytes = 22020096 # vp whitelist vp_whitelist = [] # tx whitelist diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 4bb96bbe49..ab2dea5b9e 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_macros" resolver = "2" -version = "0.12.0" +version = "0.12.1" [lib] proc-macro = true diff --git a/proof_of_stake/Cargo.toml b/proof_of_stake/Cargo.toml index 822ef2fabb..60fd3b8746 100644 --- a/proof_of_stake/Cargo.toml +++ b/proof_of_stake/Cargo.toml @@ -6,12 +6,17 @@ license = "GPL-3.0" name = "namada_proof_of_stake" readme = "../README.md" resolver = "2" -version = "0.12.0" +version = "0.12.1" [features] default = ["abciplus"] +abcipp = [ + "namada_core/abcipp", + "tendermint-proto-abcipp", +] abciplus = [ "namada_core/abciplus", + "tendermint-proto", ] # testing helpers testing = ["proptest"] @@ -20,11 +25,16 @@ testing = ["proptest"] namada_core = {path = "../core", default-features = false} borsh = "0.9.1" derivative = "2.2.0" +ferveo-common = {git = "https://github.com/anoma/ferveo", rev = "9e5e91c954158e7cff45c483fd06cd649a81553f"} # A fork with state machine testing proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm", optional = true} rust_decimal = { version = "1.26.1", features = ["borsh"] } rust_decimal_macros = "1.26.1" +tendermint-proto-abcipp = {package = "tendermint-proto", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} +tendermint-proto = {version = "0.23.6", optional = true} thiserror = "1.0.30" tracing = "0.1.30" [dev-dependencies] +# A fork with state machine testing +proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm"} \ No newline at end of file diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index 64c7ffdff8..77b47cfc8a 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -15,6 +15,7 @@ pub mod btree_set; pub mod epoched; pub mod parameters; +pub mod pos_queries; pub mod storage; pub mod types; pub mod validation; @@ -34,12 +35,16 @@ use namada_core::types::storage::Epoch; use namada_core::types::token; pub use parameters::PosParams; use rust_decimal::Decimal; +#[cfg(not(feature = "abcipp"))] +pub use tendermint_proto; +#[cfg(feature = "abcipp")] +pub use tendermint_proto_abcipp as tendermint_proto; use thiserror::Error; use types::{ ActiveValidator, Bonds, CommissionRates, GenesisValidator, Slash, SlashType, Slashes, TotalDeltas, Unbond, Unbonds, ValidatorConsensusKeys, - ValidatorDeltas, ValidatorSet, ValidatorSetUpdate, ValidatorSets, - ValidatorState, ValidatorStates, + ValidatorDeltas, ValidatorEthKey, ValidatorSet, ValidatorSetUpdate, + ValidatorSets, ValidatorState, ValidatorStates, }; use crate::btree_set::BTreeSetShims; @@ -118,6 +123,18 @@ pub trait PosReadOnly { /// Read PoS total deltas for all validators (active and inactive) fn read_total_deltas(&self) -> Result; + /// Read PoS validator's Eth bridge governance key + fn read_validator_eth_cold_key( + &self, + key: &Address, + ) -> Option; + + /// Read PoS validator's Eth validator set update signing key + fn read_validator_eth_hot_key( + &self, + key: &Address, + ) -> Option; + /// Check if the given address is a validator by checking that it has some /// state. fn is_validator( @@ -233,6 +250,18 @@ pub trait PosActions: PosReadOnly { key: &Address, value: ValidatorConsensusKeys, ) -> Result<(), storage_api::Error>; + /// Write PoS validator's Eth bridge governance key + fn write_validator_eth_cold_key( + &mut self, + address: &Address, + value: ValidatorEthKey, + ) -> Result<(), storage_api::Error>; + /// Write PoS validator's Eth validator set update signing key + fn write_validator_eth_hot_key( + &mut self, + address: &Address, + value: ValidatorEthKey, + ) -> Result<(), storage_api::Error>; /// Write PoS validator's state. fn write_validator_state( &mut self, @@ -299,10 +328,13 @@ pub trait PosActions: PosReadOnly { ) -> Result<(), storage_api::Error>; /// Attempt to update the given account to become a validator. + #[allow(clippy::too_many_arguments)] fn become_validator( &mut self, address: &Address, consensus_key: &common::PublicKey, + eth_cold_key: &common::PublicKey, + eth_hot_key: &common::PublicKey, current_epoch: Epoch, commission_rate: Decimal, max_commission_rate_change: Decimal, @@ -318,6 +350,8 @@ pub trait PosActions: PosReadOnly { let consensus_key_clone = consensus_key.clone(); let BecomeValidatorData { consensus_key, + eth_cold_key, + eth_hot_key, state, deltas, commission_rate, @@ -326,12 +360,16 @@ pub trait PosActions: PosReadOnly { ¶ms, address, consensus_key, + eth_cold_key, + eth_hot_key, &mut validator_set, current_epoch, commission_rate, max_commission_rate_change, ); self.write_validator_consensus_key(address, consensus_key)?; + self.write_validator_eth_cold_key(address, eth_cold_key)?; + self.write_validator_eth_hot_key(address, eth_hot_key)?; self.write_validator_state(address, state)?; self.write_validator_set(validator_set)?; self.write_validator_address_raw_hash(address, &consensus_key_clone)?; @@ -643,6 +681,17 @@ pub trait PosBase { &self, key: &Address, ) -> Decimal; + /// Read PoS validator's Eth bridge governance key + fn read_validator_eth_cold_key( + &self, + key: &Address, + ) -> Option; + + /// Read PoS validator's Eth validator set update signing key + fn read_validator_eth_hot_key( + &self, + key: &Address, + ) -> Option; /// Read PoS validator set (active and inactive). fn read_validator_set(&self) -> ValidatorSets; /// Read PoS total deltas of all validators (active and inactive). @@ -687,6 +736,18 @@ pub trait PosBase { fn write_validator_slash(&mut self, validator: &Address, value: Slash); /// Write PoS bond (validator self-bond or a delegation). fn write_bond(&mut self, key: &BondId, value: &Bonds); + /// Write PoS validator's Eth bridge governance key + fn write_validator_eth_cold_key( + &mut self, + address: &Address, + value: &ValidatorEthKey, + ); + /// Write PoS validator's Eth validator set update signing key + fn write_validator_eth_hot_key( + &mut self, + address: &Address, + value: &ValidatorEthKey, + ); /// Write PoS validator set (active and inactive). fn write_validator_set(&mut self, value: &ValidatorSets); /// Write total deltas in PoS for all validators (active and inactive) @@ -736,6 +797,8 @@ pub trait PosBase { state, deltas, bond: (bond_id, bond), + eth_cold_key, + eth_hot_key, } = res?; self.write_validator_address_raw_hash( address, @@ -744,6 +807,8 @@ pub trait PosBase { .expect("Consensus key must be set"), ); self.write_validator_consensus_key(address, &consensus_key); + self.write_validator_eth_cold_key(address, ð_cold_key); + self.write_validator_eth_hot_key(address, ð_hot_key); self.write_validator_state(address, &state); self.write_validator_deltas(address, &deltas); self.write_bond(&bond_id, &bond); @@ -945,6 +1010,8 @@ pub trait PosBase { pub enum GenesisError { #[error("Voting power overflow: {0}")] VotingPowerOverflow(TryFromIntError), + #[error("Ethereum address can only be of secp kind")] + SecpKeyConversion, } #[allow(missing_docs)] @@ -952,6 +1019,8 @@ pub enum GenesisError { pub enum BecomeValidatorError { #[error("The given address {0} is already a validator")] AlreadyValidator(Address), + #[error("Ethereum address can only be of secp kind")] + SecpKeyConversion, } #[allow(missing_docs)] @@ -1050,6 +1119,8 @@ struct GenesisValidatorData { state: ValidatorStates, deltas: ValidatorDeltas, bond: (BondId, Bonds), + eth_cold_key: ValidatorEthKey, + eth_hot_key: ValidatorEthKey, } /// A function that returns genesis data created from the initial validator set. @@ -1103,11 +1174,17 @@ fn init_genesis<'a>( address, tokens, consensus_key, + eth_cold_key, + eth_hot_key, commission_rate, max_commission_rate_change, }| { let consensus_key = Epoched::init_at_genesis(consensus_key.clone(), current_epoch); + let eth_cold_key = + Epoched::init_at_genesis(eth_cold_key.clone(), current_epoch); + let eth_hot_key = + Epoched::init_at_genesis(eth_hot_key.clone(), current_epoch); let commission_rate = Epoched::init_at_genesis(*commission_rate, current_epoch); let state = Epoched::init_at_genesis( @@ -1138,6 +1215,8 @@ fn init_genesis<'a>( state, deltas, bond: (bond_id, bond), + eth_cold_key, + eth_hot_key, }) }, ); @@ -1212,6 +1291,8 @@ fn slash( struct BecomeValidatorData { consensus_key: ValidatorConsensusKeys, + eth_cold_key: ValidatorEthKey, + eth_hot_key: ValidatorEthKey, state: ValidatorStates, deltas: ValidatorDeltas, commission_rate: Decimal, @@ -1219,10 +1300,13 @@ struct BecomeValidatorData { } /// A function that initialized data for a new validator. +#[allow(clippy::too_many_arguments)] fn become_validator( params: &PosParams, address: &Address, consensus_key: &common::PublicKey, + eth_cold_key: &common::PublicKey, + eth_hot_key: &common::PublicKey, validator_set: &mut ValidatorSets, current_epoch: Epoch, commission_rate: Decimal, @@ -1230,6 +1314,9 @@ fn become_validator( ) -> BecomeValidatorData { let consensus_key = Epoched::init(consensus_key.clone(), current_epoch, params); + let eth_cold_key = + Epoched::init(eth_cold_key.clone(), current_epoch, params); + let eth_hot_key = Epoched::init(eth_hot_key.clone(), current_epoch, params); let mut state = Epoched::init_at_genesis(ValidatorState::Pending, current_epoch); @@ -1262,6 +1349,8 @@ fn become_validator( BecomeValidatorData { consensus_key, + eth_cold_key, + eth_hot_key, state, deltas, commission_rate, diff --git a/proof_of_stake/src/pos_queries.rs b/proof_of_stake/src/pos_queries.rs new file mode 100644 index 0000000000..5f01cb4af8 --- /dev/null +++ b/proof_of_stake/src/pos_queries.rs @@ -0,0 +1,437 @@ +//! Storage API for querying data about Proof-of-stake related +//! data. This includes validator and epoch related data. +use std::collections::BTreeSet; + +use borsh::{BorshDeserialize, BorshSerialize}; +use ferveo_common::TendermintValidator; +use namada_core::ledger::parameters::storage::get_max_proposal_bytes_key; +use namada_core::ledger::parameters::EpochDuration; +use namada_core::ledger::storage::types::decode; +use namada_core::ledger::storage::Storage; +use namada_core::ledger::{storage, storage_api}; +use namada_core::types::address::Address; +use namada_core::types::chain::ProposalBytes; +use namada_core::types::ethereum_events::EthAddress; +use namada_core::types::key::dkg_session_keys::DkgPublicKey; +use namada_core::types::storage::{BlockHeight, Epoch}; +use namada_core::types::transaction::EllipticCurve; +use namada_core::types::vote_extensions::validator_set_update::EthAddrBook; +use namada_core::types::{key, token}; +use thiserror::Error; + +use crate::tendermint_proto::google::protobuf; +use crate::tendermint_proto::types::EvidenceParams; +use crate::types::WeightedValidator; +use crate::{PosBase, PosParams}; + +/// Errors returned by [`PosQueries`] operations. +#[derive(Error, Debug)] +pub enum Error { + /// The given address is not among the set of active validators for + /// the corresponding epoch. + #[error( + "The address '{0:?}' is not among the active validator set for epoch \ + {1}" + )] + NotValidatorAddress(Address, Epoch), + /// The given public key does not correspond to any active validator's + /// key at the provided epoch. + #[error( + "The public key '{0}' is not among the active validator set for epoch \ + {1}" + )] + NotValidatorKey(String, Epoch), + /// The given public key hash does not correspond to any active validator's + /// key at the provided epoch. + #[error( + "The public key hash '{0}' is not among the active validator set for \ + epoch {1}" + )] + NotValidatorKeyHash(String, Epoch), + /// An invalid Tendermint validator address was detected. + #[error("Invalid validator tendermint address")] + InvalidTMAddress, +} + +/// Result type returned by [`PosQueries`] operations. +pub type Result = ::std::result::Result; + +/// This enum is used as a parameter to +/// [`PosQueries::can_send_validator_set_update`]. +pub enum SendValsetUpd { + /// Check if it is possible to send a validator set update + /// vote extension at the current block height. + Now, + /// Check if it is possible to send a validator set update + /// vote extension at the previous block height. + AtPrevHeight, +} + +/// Methods used to query blockchain proof-of-stake related state, +/// such as the currently active set of validators. +pub trait PosQueries { + /// Get the set of active validators for a given epoch (defaulting to the + /// epoch of the current yet-to-be-committed block). + fn get_active_validators( + &self, + epoch: Option, + ) -> BTreeSet; + + /// Lookup the total voting power for an epoch (defaulting to the + /// epoch of the current yet-to-be-committed block). + fn get_total_voting_power(&self, epoch: Option) -> token::Amount; + /// Simple helper function for the ledger to get balances + /// of the specified token at the specified address. + fn get_balance(&self, token: &Address, owner: &Address) -> token::Amount; + + /// Return evidence parameters. + // TODO: impove this docstring + fn get_evidence_params( + &self, + epoch_duration: &EpochDuration, + pos_params: &PosParams, + ) -> EvidenceParams; + + /// Lookup data about a validator from their protocol signing key. + fn get_validator_from_protocol_pk( + &self, + pk: &key::common::PublicKey, + epoch: Option, + ) -> Result>; + + /// Lookup data about a validator from their address. + fn get_validator_from_address( + &self, + address: &Address, + epoch: Option, + ) -> Result<(token::Amount, key::common::PublicKey)>; + + /// Given a tendermint validator, the address is the hash + /// of the validators public key. We look up the native + /// address from storage using this hash. + // TODO: We may change how this lookup is done, see + // https://github.com/anoma/namada/issues/200 + fn get_validator_from_tm_address( + &self, + tm_address: &[u8], + epoch: Option, + ) -> Result
; + + /// Determines if it is possible to send a validator set update vote + /// extension at the provided [`BlockHeight`] in [`SendValsetUpd`]. + fn can_send_validator_set_update(&self, can_send: SendValsetUpd) -> bool; + + /// Check if we are at a given [`BlockHeight`] offset, `height_offset`, + /// within the current [`Epoch`]. + fn is_deciding_offset_within_epoch(&self, height_offset: u64) -> bool; + + /// Given some [`BlockHeight`], return the corresponding [`Epoch`]. + fn get_epoch(&self, height: BlockHeight) -> Option; + + /// Retrieves the [`BlockHeight`] that is currently being decided. + fn get_current_decision_height(&self) -> BlockHeight; + + /// For a given Namada validator, return its corresponding Ethereum bridge + /// address. + fn get_ethbridge_from_namada_addr( + &self, + validator: &Address, + epoch: Option, + ) -> Option; + + /// For a given Namada validator, return its corresponding Ethereum + /// governance address. + fn get_ethgov_from_namada_addr( + &self, + validator: &Address, + epoch: Option, + ) -> Option; + + /// Extension of [`Self::get_active_validators`], which additionally returns + /// all Ethereum addresses of some validator. + fn get_active_eth_addresses<'db>( + &'db self, + epoch: Option, + ) -> Box + 'db>; + + /// Retrieve the `max_proposal_bytes` consensus parameter from storage. + fn get_max_proposal_bytes(&self) -> ProposalBytes; +} + +impl PosQueries for Storage +where + D: storage::DB + for<'iter> storage::DBIter<'iter>, + H: storage::StorageHasher, +{ + fn get_active_validators( + &self, + epoch: Option, + ) -> BTreeSet { + let epoch = epoch.unwrap_or_else(|| self.get_current_epoch().0); + let validator_set = self.read_validator_set(); + validator_set + .get(epoch) + .expect("Validators for an epoch should be known") + .active + .clone() + } + + fn get_total_voting_power(&self, epoch: Option) -> token::Amount { + self.get_active_validators(epoch) + .iter() + .map(|validator| validator.bonded_stake) + .sum::() + .into() + } + + fn get_balance(&self, token: &Address, owner: &Address) -> token::Amount { + let balance = storage_api::StorageRead::read( + self, + &token::balance_key(token, owner), + ); + // Storage read must not fail, but there might be no value, in which + // case default (0) is returned + balance + .expect("Storage read in the protocol must not fail") + .unwrap_or_default() + } + + fn get_evidence_params( + &self, + epoch_duration: &EpochDuration, + pos_params: &PosParams, + ) -> EvidenceParams { + // Minimum number of epochs before tokens are unbonded and can be + // withdrawn + let len_before_unbonded = + std::cmp::max(pos_params.unbonding_len as i64 - 1, 0); + let max_age_num_blocks: i64 = + epoch_duration.min_num_of_blocks as i64 * len_before_unbonded; + let min_duration_secs = epoch_duration.min_duration.0 as i64; + let max_age_duration = Some(protobuf::Duration { + seconds: min_duration_secs * len_before_unbonded, + nanos: 0, + }); + EvidenceParams { + max_age_num_blocks, + max_age_duration, + ..EvidenceParams::default() + } + } + + fn get_validator_from_protocol_pk( + &self, + pk: &key::common::PublicKey, + epoch: Option, + ) -> Result> { + let pk_bytes = pk + .try_to_vec() + .expect("Serializing public key should not fail"); + let epoch = epoch.unwrap_or_else(|| self.get_current_epoch().0); + self.get_active_validators(Some(epoch)) + .into_iter() + .find(|validator| { + let pk_key = key::protocol_pk_key(&validator.address); + match self.read(&pk_key) { + Ok((Some(bytes), _)) => bytes == pk_bytes, + _ => false, + } + }) + .map(|validator| { + let dkg_key = + key::dkg_session_keys::dkg_pk_key(&validator.address); + let bytes = self + .read(&dkg_key) + .expect("Validator should have public dkg key") + .0 + .expect("Validator should have public dkg key"); + let dkg_publickey = + &::deserialize( + &mut bytes.as_ref(), + ) + .expect( + "DKG public key in storage should be deserializable", + ); + TendermintValidator { + power: validator.bonded_stake, + address: validator.address.to_string(), + public_key: dkg_publickey.into(), + } + }) + .ok_or_else(|| Error::NotValidatorKey(pk.to_string(), epoch)) + } + + fn get_validator_from_address( + &self, + address: &Address, + epoch: Option, + ) -> Result<(token::Amount, key::common::PublicKey)> { + let epoch = epoch.unwrap_or_else(|| self.get_current_epoch().0); + self.get_active_validators(Some(epoch)) + .into_iter() + .find(|validator| address == &validator.address) + .map(|validator| { + let protocol_pk_key = key::protocol_pk_key(&validator.address); + let bytes = self + .read(&protocol_pk_key) + .expect("Validator should have public protocol key") + .0 + .expect("Validator should have public protocol key"); + let protocol_pk: key::common::PublicKey = + BorshDeserialize::deserialize(&mut bytes.as_ref()).expect( + "Protocol public key in storage should be \ + deserializable", + ); + (validator.bonded_stake.into(), protocol_pk) + }) + .ok_or_else(|| Error::NotValidatorAddress(address.clone(), epoch)) + } + + fn get_validator_from_tm_address( + &self, + tm_address: &[u8], + epoch: Option, + ) -> Result
{ + let epoch = epoch.unwrap_or_else(|| self.get_current_epoch().0); + let validator_raw_hash = core::str::from_utf8(tm_address) + .map_err(|_| Error::InvalidTMAddress)?; + self.read_validator_address_raw_hash(validator_raw_hash) + .ok_or_else(|| { + Error::NotValidatorKeyHash( + validator_raw_hash.to_string(), + epoch, + ) + }) + } + + #[cfg(feature = "abcipp")] + #[inline] + fn can_send_validator_set_update(&self, _can_send: SendValsetUpd) -> bool { + // TODO: implement this method for ABCI++; should only be able to send + // a validator set update at the second block of an epoch + false + } + + #[cfg(not(feature = "abcipp"))] + #[inline] + fn can_send_validator_set_update(&self, can_send: SendValsetUpd) -> bool { + if matches!(can_send, SendValsetUpd::AtPrevHeight) { + // when checking vote extensions in Prepare + // and ProcessProposal, we simply return true + true + } else { + // offset of 1 => are we at the 2nd + // block within the epoch? + self.is_deciding_offset_within_epoch(1) + } + } + + fn is_deciding_offset_within_epoch(&self, height_offset: u64) -> bool { + let current_decision_height = self.get_current_decision_height(); + + // NOTE: the first stored height in `fst_block_heights_of_each_epoch` + // is 0, because of a bug (should be 1), so this code needs to + // handle that case + // + // we can remove this check once that's fixed + if self.get_current_epoch().0 == Epoch(0) { + let height_offset_within_epoch = BlockHeight(1 + height_offset); + return current_decision_height == height_offset_within_epoch; + } + + let fst_heights_of_each_epoch = + self.block.pred_epochs.first_block_heights(); + + fst_heights_of_each_epoch + .last() + .map(|&h| { + let height_offset_within_epoch = h + height_offset; + current_decision_height == height_offset_within_epoch + }) + .unwrap_or(false) + } + + #[inline] + fn get_epoch(&self, height: BlockHeight) -> Option { + self.block.pred_epochs.get_epoch(height) + } + + #[inline] + fn get_current_decision_height(&self) -> BlockHeight { + self.last_height + 1 + } + + #[inline] + fn get_ethbridge_from_namada_addr( + &self, + validator: &Address, + epoch: Option, + ) -> Option { + let epoch = epoch.unwrap_or_else(|| self.get_current_epoch().0); + self.read_validator_eth_hot_key(validator) + .as_ref() + .and_then(|epk| epk.get(epoch).and_then(|pk| pk.try_into().ok())) + } + + #[inline] + fn get_ethgov_from_namada_addr( + &self, + validator: &Address, + epoch: Option, + ) -> Option { + let epoch = epoch.unwrap_or_else(|| self.get_current_epoch().0); + self.read_validator_eth_cold_key(validator) + .as_ref() + .and_then(|epk| epk.get(epoch).and_then(|pk| pk.try_into().ok())) + } + + #[inline] + fn get_active_eth_addresses<'db>( + &'db self, + epoch: Option, + ) -> Box + 'db> + { + let epoch = epoch.unwrap_or_else(|| self.get_current_epoch().0); + Box::new(self.get_active_validators(Some(epoch)).into_iter().map( + move |validator| { + let hot_key_addr = self + .get_ethbridge_from_namada_addr( + &validator.address, + Some(epoch), + ) + .expect( + "All Namada validators should have an Ethereum bridge \ + key", + ); + let cold_key_addr = self + .get_ethgov_from_namada_addr( + &validator.address, + Some(epoch), + ) + .expect( + "All Namada validators should have an Ethereum \ + governance key", + ); + let eth_addr_book = EthAddrBook { + hot_key_addr, + cold_key_addr, + }; + ( + eth_addr_book, + validator.address, + validator.bonded_stake.into(), + ) + }, + )) + } + + fn get_max_proposal_bytes(&self) -> ProposalBytes { + let key = get_max_proposal_bytes_key(); + let (maybe_value, _gas) = self + .read(&key) + .expect("Must be able to read ProposalBytes from storage"); + let value = + maybe_value.expect("ProposalBytes must be present in storage"); + decode(value).expect("Must be able to decode ProposalBytes in storage") + } +} diff --git a/proof_of_stake/src/storage.rs b/proof_of_stake/src/storage.rs index 5e11165c55..0df7ab4e12 100644 --- a/proof_of_stake/src/storage.rs +++ b/proof_of_stake/src/storage.rs @@ -16,6 +16,8 @@ const PARAMS_STORAGE_KEY: &str = "params"; const VALIDATOR_STORAGE_PREFIX: &str = "validator"; const VALIDATOR_ADDRESS_RAW_HASH: &str = "address_raw_hash"; const VALIDATOR_CONSENSUS_KEY_STORAGE_KEY: &str = "consensus_key"; +const VALIDATOR_ETH_COLD_KEY_STORAGE_KEY: &str = "eth_cold_key"; +const VALIDATOR_ETH_HOT_KEY_STORAGE_KEY: &str = "eth_hot_key"; const VALIDATOR_STATE_STORAGE_KEY: &str = "state"; const VALIDATOR_DELTAS_STORAGE_KEY: &str = "validator_deltas"; const VALIDATOR_COMMISSION_RATE_STORAGE_KEY: &str = "commission_rate"; @@ -106,6 +108,56 @@ pub fn is_validator_consensus_key_key(key: &Key) -> Option<&Address> { } } +/// Storage key for validator's eth cold key. +pub fn validator_eth_cold_key_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_ETH_COLD_KEY_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for validator's eth cold key? +pub fn is_validator_eth_cold_key_key(key: &Key) -> Option<&Address> { + match &key.segments[..] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(validator), + DbKeySeg::StringSeg(key), + ] if addr == &ADDRESS + && prefix == VALIDATOR_STORAGE_PREFIX + && key == VALIDATOR_ETH_COLD_KEY_STORAGE_KEY => + { + Some(validator) + } + _ => None, + } +} + +/// Storage key for validator's eth hot key. +pub fn validator_eth_hot_key_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_ETH_HOT_KEY_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for validator's eth hot key? +pub fn is_validator_eth_hot_key_key(key: &Key) -> Option<&Address> { + match &key.segments[..] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(validator), + DbKeySeg::StringSeg(key), + ] if addr == &ADDRESS + && prefix == VALIDATOR_STORAGE_PREFIX + && key == VALIDATOR_ETH_HOT_KEY_STORAGE_KEY => + { + Some(validator) + } + _ => None, + } +} + /// Storage key for validator's commission rate. pub fn validator_commission_rate_key(validator: &Address) -> Key { validator_prefix(validator) @@ -158,7 +210,7 @@ pub fn is_validator_max_commission_rate_change_key( } } -/// Storage key for validator's consensus key. +/// Storage key for validator's state. pub fn validator_state_key(validator: &Address) -> Key { validator_prefix(validator) .push(&VALIDATOR_STATE_STORAGE_KEY.to_owned()) @@ -437,6 +489,23 @@ where decode(value.unwrap()).unwrap() } + fn read_validator_eth_cold_key( + &self, + key: &Address, + ) -> Option { + let (value, _gas) = + self.read(&validator_eth_cold_key_key(key)).unwrap(); + value.map(|value| decode(value).unwrap()) + } + + fn read_validator_eth_hot_key( + &self, + key: &Address, + ) -> Option { + let (value, _gas) = self.read(&validator_eth_hot_key_key(key)).unwrap(); + value.map(|value| decode(value).unwrap()) + } + fn write_pos_params(&mut self, params: &PosParams) { self.write(¶ms_key(), encode(params)).unwrap(); } @@ -518,6 +587,24 @@ where self.write(&validator_set_key(), encode(value)).unwrap(); } + fn write_validator_eth_cold_key( + &mut self, + address: &Address, + value: &types::ValidatorEthKey, + ) { + self.write(&validator_eth_cold_key_key(address), encode(value)) + .unwrap(); + } + + fn write_validator_eth_hot_key( + &mut self, + address: &Address, + value: &types::ValidatorEthKey, + ) { + self.write(&validator_eth_hot_key_key(address), encode(value)) + .unwrap(); + } + fn write_total_deltas(&mut self, value: &TotalDeltas) { self.write(&total_deltas_key(), encode(value)).unwrap(); } @@ -705,6 +792,26 @@ macro_rules! impl_pos_read_only { namada_core::ledger::storage_api::StorageRead::read_bytes(self, &total_deltas_key())?.unwrap(); Ok(namada_core::ledger::storage::types::decode(value).unwrap()) } + + // TODO: return result + fn read_validator_eth_cold_key( + &self, + key: &Address, + ) -> Option { + let value = + namada_core::ledger::storage_api::StorageRead::read_bytes(self, &validator_eth_cold_key_key(key)).unwrap().unwrap(); + Some(namada_core::ledger::storage::types::decode(value).unwrap()) + } + + // TODO: return result + fn read_validator_eth_hot_key( + &self, + key: &Address, + ) -> Option { + let value = + namada_core::ledger::storage_api::StorageRead::read_bytes(self, &validator_eth_hot_key_key(key)).unwrap().unwrap(); + Some(namada_core::ledger::storage::types::decode(value).unwrap()) + } } } } diff --git a/proof_of_stake/src/types.rs b/proof_of_stake/src/types.rs index 089da88c86..e4ac6be425 100644 --- a/proof_of_stake/src/types.rs +++ b/proof_of_stake/src/types.rs @@ -25,6 +25,8 @@ pub type ValidatorConsensusKeys = Epoched; pub type ValidatorStates = Epoched; /// Epoched validator's total deltas. pub type ValidatorDeltas = EpochedDelta; +/// Epoched validator's eth key. +pub type ValidatorEthKey = Epoched; /// Epoched bond. pub type Bonds = EpochedDelta; @@ -56,6 +58,11 @@ pub struct GenesisValidator { pub tokens: token::Amount, /// A public key used for signing validator's consensus actions pub consensus_key: common::PublicKey, + /// An Eth bridge governance public key + pub eth_cold_key: common::PublicKey, + /// An Eth bridge hot signing public key used for validator set updates and + /// cross-chain transactions + pub eth_hot_key: common::PublicKey, /// Commission rate charged on rewards for delegators (bounded inside 0-1) pub commission_rate: Decimal, /// Maximum change in commission rate permitted per epoch diff --git a/scripts/unwrap_e2e_log.py b/scripts/unwrap_e2e_log.py new file mode 100755 index 0000000000..a468a35acb --- /dev/null +++ b/scripts/unwrap_e2e_log.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 + +# this script takes `expectrl` log outputs, such as the ones emitted by +# e2e tests, and unwraps them into a more readable format + +import re +import sys + +UNICODE = re.compile(r'\\u{([\da-fA-F]+)}') + +def main(): + if len(sys.argv) > 1: + with open(sys.argv[1], 'r') as f: + process_file(f) + else: + process_file(sys.stdin) + +def process_file(f): + for line in f.readlines(): + process_line(line) + sys.stdout.flush() + +def process_line(line): + for m in UNICODE.findall(line): + line = line.replace(f'\\u{{{m}}}', f'\\u{int(m, 16):04x}') + line = \ + try_parse_line_str(line) or \ + try_parse_line_bytes(line) or \ + '' + sys.stdout.write(line) + +def try_parse_line_str(line): + prefix_full = 'read: "' + prefix = prefix_full[:-1] + if line.startswith(prefix_full): + return eval(line[len(prefix):]) + +def try_parse_line_bytes(line): + prefix = 'read:(bytes): ' + if line.startswith(prefix): + return bytes(eval(line[len(prefix):])).decode("utf-8", "backslashreplace") + +if __name__ == '__main__': + main() diff --git a/shared/Cargo.toml b/shared/Cargo.toml index efcd0e81a0..ad91afd3cf 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada" resolver = "2" -version = "0.12.0" +version = "0.12.1" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -47,13 +47,16 @@ abcipp = [ "ibc-proto-abcipp", "ibc-abcipp", "tendermint-abcipp", + "namada_core/tendermint-abcipp", "tendermint-proto-abcipp", # it's OK to include the tendermint-rpc feature here, as we aren't currently building wasms with `abcipp` "tendermint-rpc-abcipp", + "namada_ethereum_bridge/abcipp", ] abciplus = [ "namada_core/abciplus", "namada_proof_of_stake/abciplus", + "namada_ethereum_bridge/abciplus", "ibc", "ibc-proto", "tendermint", @@ -73,12 +76,15 @@ testing = [ "namada_proof_of_stake/testing", "async-client", "proptest", + "rand_core", + "rand", "tempfile", ] [dependencies] namada_core = {path = "../core", default-features = false, features = ["secp256k1-sign-verify"]} namada_proof_of_stake = {path = "../proof_of_stake", default-features = false} +namada_ethereum_bridge = {path = "../ethereum_bridge", default-features = false} async-trait = {version = "0.1.51", optional = true} bellman = "0.11.2" bls12_381 = "0.6.1" @@ -88,6 +94,8 @@ circular-queue = "0.2.6" clru = {git = "https://github.com/marmeladema/clru-rs.git", rev = "71ca566"} data-encoding = "2.3.2" derivative = "2.2.0" +eyre = "0.6.8" +ferveo-common = {git = "https://github.com/anoma/ferveo", rev = "9e5e91c954158e7cff45c483fd06cd649a81553f"} # TODO using the same version of tendermint-rs as we do here. ibc-abcipp = {package = "ibc", git = "https://github.com/heliaxdev/ibc-rs", rev = "9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d", default-features = false, optional = true} ibc-proto-abcipp = {package = "ibc-proto", git = "https://github.com/heliaxdev/ibc-rs", rev = "9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d", default-features = false, optional = true} @@ -101,8 +109,11 @@ paste = "1.0.9" proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm", optional = true} prost = "0.9.0" pwasm-utils = {git = "https://github.com/heliaxdev/wasm-utils", tag = "v0.20.0", features = ["sign_ext"], optional = true} +rand = {version = "0.8", optional = true} +rand_core = {version = "0.6", optional = true} rayon = {version = "=1.5.3", optional = true} rust_decimal = "1.26.1" +serde = {version = "1.0.125", features = ["derive"]} serde_json = "1.0.62" sha2 = "0.9.3" # We switch off "blake2b" because it cannot be compiled to wasm diff --git a/shared/src/ledger/eth_bridge.rs b/shared/src/ledger/eth_bridge.rs new file mode 100644 index 0000000000..1aa9e4d7f7 --- /dev/null +++ b/shared/src/ledger/eth_bridge.rs @@ -0,0 +1,4 @@ +//! Re-exporting types from the namada_ethereum_bridge crate. +pub use namada_core::ledger::eth_bridge::storage::wrapped_erc20s; +pub use namada_core::ledger::eth_bridge::{ADDRESS, INTERNAL_ADDRESS}; +pub use namada_ethereum_bridge::parameters::*; diff --git a/shared/src/ledger/eth_bridge/mod.rs b/shared/src/ledger/eth_bridge/mod.rs deleted file mode 100644 index ff8505b08e..0000000000 --- a/shared/src/ledger/eth_bridge/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -//! Bridge from Ethereum - -pub mod storage; -pub mod vp; diff --git a/shared/src/ledger/eth_bridge/storage.rs b/shared/src/ledger/eth_bridge/storage.rs deleted file mode 100644 index e67abf921c..0000000000 --- a/shared/src/ledger/eth_bridge/storage.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! storage helpers -use super::vp::ADDRESS; -use crate::types::storage::{Key, KeySeg}; - -const QUEUE_STORAGE_KEY: &str = "queue"; - -/// Get the key corresponding to @EthBridge/queue -pub fn queue_key() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&QUEUE_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} diff --git a/shared/src/ledger/eth_bridge/vp.rs b/shared/src/ledger/eth_bridge/vp.rs deleted file mode 100644 index 479541e181..0000000000 --- a/shared/src/ledger/eth_bridge/vp.rs +++ /dev/null @@ -1,57 +0,0 @@ -//! Validity predicate for the Ethereum bridge - -use std::collections::BTreeSet; - -use crate::ledger::native_vp::{Ctx, NativeVp}; -use crate::ledger::storage as ledger_storage; -use crate::ledger::storage::StorageHasher; -use crate::types::address::{Address, InternalAddress}; -use crate::types::storage::Key; -use crate::vm::WasmCacheAccess; - -/// Internal address for the Ethereum bridge VP -pub const ADDRESS: Address = Address::Internal(InternalAddress::EthBridge); - -/// Validity predicate for the Ethereum bridge -pub struct EthBridge<'ctx, DB, H, CA> -where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: StorageHasher, - CA: 'static + WasmCacheAccess, -{ - /// Context to interact with the host structures. - pub ctx: Ctx<'ctx, DB, H, CA>, -} - -#[allow(missing_docs)] -#[derive(thiserror::Error, Debug)] -pub enum Error { - #[error("Internal error")] - Internal, -} - -impl<'a, DB, H, CA> NativeVp for EthBridge<'a, DB, H, CA> -where - DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: 'static + StorageHasher, - CA: 'static + WasmCacheAccess, -{ - type Error = Error; - - const ADDR: InternalAddress = InternalAddress::EthBridge; - - fn validate_tx( - &self, - _tx_data: &[u8], - _keys_changed: &BTreeSet, - _verifiers: &BTreeSet
, - ) -> Result { - tracing::debug!( - tx_data_len = _tx_data.len(), - keys_changed_len = _keys_changed.len(), - verifiers_len = _verifiers.len(), - "Validity predicate triggered", - ); - Ok(false) - } -} diff --git a/shared/src/ledger/ibc/mod.rs b/shared/src/ledger/ibc/mod.rs index 6cf1d6c9f1..a848893e97 100644 --- a/shared/src/ledger/ibc/mod.rs +++ b/shared/src/ledger/ibc/mod.rs @@ -8,7 +8,8 @@ use namada_core::ledger::ibc::storage::{ connection_counter_key, }; -use crate::ledger::storage::{self as ledger_storage, Storage, StorageHasher}; +use crate::ledger::storage::traits::StorageHasher; +use crate::ledger::storage::{self as ledger_storage, Storage}; /// Initialize storage in the genesis block. pub fn init_genesis_storage(storage: &mut Storage) diff --git a/shared/src/ledger/ibc/vp/client.rs b/shared/src/ledger/ibc/vp/client.rs index 40807673f1..7fd895289c 100644 --- a/shared/src/ledger/ibc/vp/client.rs +++ b/shared/src/ledger/ibc/vp/client.rs @@ -32,7 +32,8 @@ use crate::ibc::core::ics23_commitment::commitment::CommitmentRoot; use crate::ibc::core::ics24_host::identifier::ClientId; use crate::ibc::core::ics26_routing::msgs::Ics26Envelope; use crate::ledger::native_vp::VpEnv; -use crate::ledger::storage::{self, StorageHasher}; +use crate::ledger::storage::traits::StorageHasher; +use crate::ledger::storage::{self}; use crate::tendermint_proto::Protobuf; use crate::types::ibc::data::{Error as IbcDataError, IbcMessage}; use crate::types::storage::{BlockHeight, Key}; diff --git a/shared/src/ledger/ibc/vp/connection.rs b/shared/src/ledger/ibc/vp/connection.rs index 5f8df9a8a8..8e2f804c39 100644 --- a/shared/src/ledger/ibc/vp/connection.rs +++ b/shared/src/ledger/ibc/vp/connection.rs @@ -28,7 +28,8 @@ use crate::ibc::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTr use crate::ibc::core::ics23_commitment::commitment::CommitmentPrefix; use crate::ibc::core::ics24_host::identifier::{ClientId, ConnectionId}; use crate::ledger::native_vp::VpEnv; -use crate::ledger::storage::{self, StorageHasher}; +use crate::ledger::storage::traits::StorageHasher; +use crate::ledger::storage::{self}; use crate::tendermint_proto::Protobuf; use crate::types::ibc::data::{Error as IbcDataError, IbcMessage}; use crate::types::storage::{BlockHeight, Epoch, Key}; diff --git a/shared/src/ledger/ibc/vp/mod.rs b/shared/src/ledger/ibc/vp/mod.rs index 8a807ce754..6de5fe37ec 100644 --- a/shared/src/ledger/ibc/vp/mod.rs +++ b/shared/src/ledger/ibc/vp/mod.rs @@ -359,16 +359,6 @@ mod tests { use crate::tendermint::time::Time as TmTime; use crate::tendermint_proto::Protobuf; - use super::get_dummy_header; - use namada_core::ledger::ibc::actions::{ - self, commitment_prefix, init_connection, make_create_client_event, - make_open_ack_channel_event, make_open_ack_connection_event, - make_open_confirm_channel_event, make_open_confirm_connection_event, - make_open_init_channel_event, make_open_init_connection_event, - make_open_try_channel_event, make_open_try_connection_event, - make_send_packet_event, make_update_client_event, packet_from_message, - try_connection, - }; use super::super::storage::{ ack_key, capability_key, channel_key, client_state_key, client_type_key, client_update_height_key, client_update_timestamp_key, @@ -376,16 +366,26 @@ mod tests { next_sequence_ack_key, next_sequence_recv_key, next_sequence_send_key, port_key, receipt_key, }; + use super::get_dummy_header; use super::*; - use crate::types::key::testing::keypair_1; use crate::ledger::gas::VpGasMeter; use crate::ledger::storage::testing::TestStorage; use crate::ledger::storage::write_log::WriteLog; use crate::proto::Tx; use crate::types::ibc::data::{PacketAck, PacketReceipt}; - use crate::vm::wasm; + use crate::types::key::testing::keypair_1; use crate::types::storage::TxIndex; use crate::types::storage::{BlockHash, BlockHeight}; + use crate::vm::wasm; + use namada_core::ledger::ibc::actions::{ + self, commitment_prefix, init_connection, make_create_client_event, + make_open_ack_channel_event, make_open_ack_connection_event, + make_open_confirm_channel_event, make_open_confirm_connection_event, + make_open_init_channel_event, make_open_init_connection_event, + make_open_try_channel_event, make_open_try_connection_event, + make_send_packet_event, make_update_client_event, packet_from_message, + try_connection, + }; const ADDRESS: Address = Address::Internal(InternalAddress::Ibc); diff --git a/shared/src/ledger/ibc/vp/port.rs b/shared/src/ledger/ibc/vp/port.rs index 94aa82405f..9b91aac957 100644 --- a/shared/src/ledger/ibc/vp/port.rs +++ b/shared/src/ledger/ibc/vp/port.rs @@ -17,7 +17,8 @@ use crate::ibc::core::ics05_port::error::Error as Ics05Error; use crate::ibc::core::ics24_host::identifier::PortId; use crate::ibc::core::ics26_routing::context::ModuleId; use crate::ledger::native_vp::VpEnv; -use crate::ledger::storage::{self as ledger_storage, StorageHasher}; +use crate::ledger::storage::traits::StorageHasher; +use crate::ledger::storage::{self as ledger_storage}; use crate::types::storage::Key; use crate::vm::WasmCacheAccess; diff --git a/shared/src/ledger/ibc/vp/token.rs b/shared/src/ledger/ibc/vp/token.rs index 927bfd412b..27c8b23f6f 100644 --- a/shared/src/ledger/ibc/vp/token.rs +++ b/shared/src/ledger/ibc/vp/token.rs @@ -12,7 +12,8 @@ use crate::ibc::core::ics04_channel::packet::Packet; use crate::ibc::core::ics26_routing::msgs::Ics26Envelope; use crate::ledger::ibc::storage as ibc_storage; use crate::ledger::native_vp::{self, Ctx, NativeVp, VpEnv}; -use crate::ledger::storage::{self as ledger_storage, StorageHasher}; +use crate::ledger::storage::traits::StorageHasher; +use crate::ledger::storage::{self as ledger_storage}; use crate::proto::SignedTxData; use crate::types::address::{ Address, DecodeError as AddressError, InternalAddress, diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index 73f39dda05..2ddf1d7cf4 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -1,5 +1,4 @@ //! The ledger modules - pub mod eth_bridge; pub mod events; pub mod ibc; diff --git a/shared/src/ledger/native_vp/ethereum_bridge/authorize.rs b/shared/src/ledger/native_vp/ethereum_bridge/authorize.rs new file mode 100644 index 0000000000..762d89d311 --- /dev/null +++ b/shared/src/ledger/native_vp/ethereum_bridge/authorize.rs @@ -0,0 +1,37 @@ +//! Functionality to do with checking whether a transaction is authorized by the +//! "owner" of some key under this account +use eyre::Result; +use namada_core::types::address::Address; + +use crate::ledger::native_vp::StorageReader; + +pub(super) fn is_authorized( + _reader: impl StorageReader, + _tx_data: &[u8], + _owner: &Address, +) -> Result { + tracing::warn!( + "authorize::is_authorized is not implemented, so all transfers are \ + authorized" + ); + Ok(true) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::ledger::native_vp; + use crate::types::address; + + #[test] + fn test_is_authorized_established_address() -> Result<()> { + let reader = native_vp::testing::FakeStorageReader::default(); + let tx_data = vec![]; + let owner = address::testing::established_address_1(); + + let authorized = is_authorized(reader, &tx_data, &owner)?; + + assert!(authorized); + Ok(()) + } +} diff --git a/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs b/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs new file mode 100644 index 0000000000..3f3d2d2689 --- /dev/null +++ b/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs @@ -0,0 +1,1387 @@ +//! Validity predicate for the Ethereum bridge pool +//! +//! This pool holds user initiated transfers of value from +//! Namada to Ethereum. It is to act like a mempool: users +//! add in their desired transfers and their chosen amount +//! of NAM to cover Ethereum side gas fees. These transfers +//! can be relayed in batches along with Merkle proofs. +//! +//! This VP checks that additions to the pool are handled +//! correctly. This means that the appropriate data is +//! added to the pool and gas fees are submitted appropriately +//! and that tokens to be transferred are escrowed. +use std::collections::BTreeSet; + +use borsh::BorshDeserialize; +use eyre::eyre; +use namada_core::ledger::eth_bridge::storage::bridge_pool::{ + get_pending_key, is_bridge_pool_key, BRIDGE_POOL_ADDRESS, +}; +use namada_ethereum_bridge::storage; +use namada_ethereum_bridge::storage::wrapped_erc20s; + +use crate::ledger::native_vp::ethereum_bridge::vp::check_balance_changes; +use crate::ledger::native_vp::{Ctx, NativeVp, StorageReader}; +use crate::ledger::storage::traits::StorageHasher; +use crate::ledger::storage::{DBIter, DB}; +use crate::proto::SignedTxData; +use crate::types::address::{nam, Address, InternalAddress}; +use crate::types::eth_bridge_pool::PendingTransfer; +use crate::types::ethereum_events::EthAddress; +use crate::types::storage::Key; +use crate::types::token::{balance_key, Amount}; +use crate::vm::WasmCacheAccess; + +#[derive(thiserror::Error, Debug)] +#[error(transparent)] +/// Generic error that may be returned by the validity predicate +pub struct Error(#[from] eyre::Error); + +/// A positive or negative amount +enum SignedAmount { + Positive(Amount), + Negative(Amount), +} + +/// Validity predicate for the Ethereum bridge +pub struct BridgePoolVp<'ctx, D, H, CA> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, + CA: 'static + WasmCacheAccess, +{ + /// Context to interact with the host structures. + pub ctx: Ctx<'ctx, D, H, CA>, +} + +impl<'a, D, H, CA> BridgePoolVp<'a, D, H, CA> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: 'static + WasmCacheAccess, +{ + /// Get the change in the balance of an account + /// associated with an address + fn account_balance_delta(&self, address: &Address) -> Option { + let account_key = balance_key(&nam(), address); + let before: Amount = (&self.ctx) + .read_pre_value(&account_key) + .unwrap_or_else(|error| { + tracing::warn!(?error, %account_key, "reading pre value"); + None + })?; + let after: Amount = (&self.ctx) + .read_post_value(&account_key) + .unwrap_or_else(|error| { + tracing::warn!(?error, %account_key, "reading post value"); + None + })?; + if before > after { + Some(SignedAmount::Negative(before - after)) + } else { + Some(SignedAmount::Positive(after - before)) + } + } + + /// Check that the correct amount of Nam was sent + /// from the correct account into escrow + fn check_nam_escrowed(&self, delta: EscrowDelta) -> Result { + let EscrowDelta { + payer_account, + escrow_account, + expected_debit, + expected_credit, + } = delta; + let debited = self.account_balance_delta(payer_account); + let credited = self.account_balance_delta(escrow_account); + + match (debited, credited) { + ( + Some(SignedAmount::Negative(debit)), + Some(SignedAmount::Positive(credit)), + ) => Ok(debit == expected_debit && credit == expected_credit), + (Some(SignedAmount::Positive(_)), _) => { + tracing::debug!( + "The account {} was not debited.", + payer_account + ); + Ok(false) + } + (_, Some(SignedAmount::Negative(_))) => { + tracing::debug!( + "The Ethereum bridge pool's escrow was not credited from \ + account {}.", + payer_account + ); + Ok(false) + } + (None, _) | (_, None) => Err(Error(eyre!( + "Could not calculate the balance delta for {}", + payer_account + ))), + } + } + + /// Get the Ethereum address for wNam from storage, if possible + fn native_erc20_address(&self) -> Result { + match self.ctx.storage.read(&storage::native_erc20_key()) { + Ok((Some(bytes), _)) => { + Ok(EthAddress::try_from_slice(bytes.as_slice()).expect( + "Deserializing the Native ERC20 address from storage \ + shouldn't fail.", + )) + } + Ok(_) => Err(Error(eyre!( + "The Ethereum bridge storage is not initialized" + ))), + Err(e) => Err(Error(eyre!( + "Failed to read storage when fetching the native ERC20 \ + address with: {}", + e.to_string() + ))), + } + } + + /// Deteremine the debit and credit amounts that should be checked. + fn escrow_check<'trans>( + &self, + transfer: &'trans PendingTransfer, + ) -> Result, Error> { + // there is a corner case where the gas fees and escrowed Nam + // are debited from the same address when mint wNam. + Ok( + if transfer.gas_fee.payer == transfer.transfer.sender + && transfer.transfer.asset == self.native_erc20_address()? + { + let debit = transfer + .gas_fee + .amount + .checked_add(&transfer.transfer.amount) + .ok_or_else(|| { + Error(eyre!( + "Addition oveflowed adding gas fee + transfer \ + amount." + )) + })?; + EscrowCheck { + gas_check: EscrowDelta { + payer_account: &transfer.gas_fee.payer, + escrow_account: &BRIDGE_POOL_ADDRESS, + expected_debit: debit, + expected_credit: transfer.gas_fee.amount, + }, + token_check: EscrowDelta { + payer_account: &transfer.transfer.sender, + escrow_account: &Address::Internal( + InternalAddress::EthBridge, + ), + expected_debit: debit, + expected_credit: transfer.transfer.amount, + }, + } + } else { + EscrowCheck { + gas_check: EscrowDelta { + payer_account: &transfer.gas_fee.payer, + escrow_account: &BRIDGE_POOL_ADDRESS, + expected_debit: transfer.gas_fee.amount, + expected_credit: transfer.gas_fee.amount, + }, + token_check: EscrowDelta { + payer_account: &transfer.transfer.sender, + escrow_account: &Address::Internal( + InternalAddress::EthBridge, + ), + expected_debit: transfer.transfer.amount, + expected_credit: transfer.transfer.amount, + }, + } + }, + ) + } +} + +/// Check if a delta matches the delta given by a transfer +fn check_delta(delta: &(Address, Amount), transfer: &PendingTransfer) -> bool { + delta.0 == transfer.transfer.sender && delta.1 == transfer.transfer.amount +} + +/// Helper struct for handling the different escrow +/// checking scenarios. +struct EscrowDelta<'a> { + payer_account: &'a Address, + escrow_account: &'a Address, + expected_debit: Amount, + expected_credit: Amount, +} + +/// There are two checks we must do when minting wNam. +/// 1. Check that gas fees were escrowed. +/// 2. Check that the Nam to back wNam was escrowed. +struct EscrowCheck<'a> { + gas_check: EscrowDelta<'a>, + token_check: EscrowDelta<'a>, +} + +impl<'a, D, H, CA> NativeVp for BridgePoolVp<'a, D, H, CA> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: 'static + WasmCacheAccess, +{ + type Error = Error; + + const ADDR: InternalAddress = InternalAddress::EthBridgePool; + + fn validate_tx( + &self, + tx_data: &[u8], + keys_changed: &BTreeSet, + _verifiers: &BTreeSet
, + ) -> Result { + tracing::debug!( + tx_data_len = tx_data.len(), + keys_changed_len = keys_changed.len(), + verifiers_len = _verifiers.len(), + "Ethereum Bridge Pool VP triggered", + ); + let signed: SignedTxData = BorshDeserialize::try_from_slice(tx_data) + .map_err(|e| Error(e.into()))?; + + let transfer: PendingTransfer = match signed.data { + Some(data) => BorshDeserialize::try_from_slice(data.as_slice()) + .map_err(|e| Error(e.into()))?, + None => { + tracing::debug!( + "Rejecting transaction as there was no signed data" + ); + return Ok(false); + } + }; + + let pending_key = get_pending_key(&transfer); + // check that transfer is not already in the pool + match (&self.ctx).read_pre_value::(&pending_key) { + Ok(Some(_)) => { + tracing::debug!( + "Rejecting transaction as the transfer is already in the \ + Ethereum bridge pool." + ); + return Ok(false); + } + Err(e) => { + return Err(eyre!( + "Could not read the storage key associated with the \ + transfer: {:?}", + e + ) + .into()); + } + _ => {} + } + for key in keys_changed.iter().filter(|k| is_bridge_pool_key(k)) { + if *key != pending_key { + tracing::debug!( + "Rejecting transaction as it is attempting to change an \ + incorrect key in the Ethereum bridge pool: {}.\n \ + Expected key: {}", + key, + pending_key + ); + return Ok(false); + } + } + let pending: PendingTransfer = + (&self.ctx).read_post_value(&pending_key)?.ok_or(eyre!( + "Rejecting transaction as the transfer wasn't added to the \ + pool of pending transfers" + ))?; + if pending != transfer { + tracing::debug!( + "An incorrect transfer was added to the Ethereum bridge pool: \ + {:?}.\n Expected: {:?}", + transfer, + pending + ); + return Ok(false); + } + // The deltas in the escrowed amounts we must check. + let escrow_checks = self.escrow_check(&transfer)?; + // check that gas we correctly escrowed. + if !self.check_nam_escrowed(escrow_checks.gas_check)? { + return Ok(false); + } + // if we are going to mint wNam on Ethereum, the appropriate + // amount of Nam must be escrowed in the Ethereum bridge VP's storage. + let wnam_address = self.native_erc20_address()?; + if transfer.transfer.asset == wnam_address { + // check that correct amount of Nam was put into escrow. + return if self.check_nam_escrowed(escrow_checks.token_check)? { + tracing::info!( + "The Ethereum bridge pool VP accepted the transfer {:?}.", + transfer + ); + Ok(true) + } else { + Ok(false) + }; + } + + // check that the assets to be transferred were escrowed + let asset_key = wrapped_erc20s::Keys::from(&transfer.transfer.asset); + let owner_key = asset_key.balance(&transfer.transfer.sender); + let escrow_key = asset_key.balance(&BRIDGE_POOL_ADDRESS); + if keys_changed.contains(&owner_key) + && keys_changed.contains(&escrow_key) + { + match check_balance_changes( + &self.ctx, + (&escrow_key).try_into().expect("This should not fail"), + (&owner_key).try_into().expect("This should not fail"), + ) { + Ok(Some(delta)) if check_delta(&delta, &transfer) => {} + other => { + tracing::debug!( + "The assets of the transfer were not properly \ + escrowed into the Ethereum bridge pool: {:?}", + other + ); + return Ok(false); + } + } + } else { + tracing::debug!( + "The assets of the transfer were not properly escrowed into \ + the Ethereum bridge pool." + ); + return Ok(false); + } + + tracing::info!( + "The Ethereum bridge pool VP accepted the transfer {:?}.", + transfer + ); + Ok(true) + } +} + +#[cfg(test)] +mod test_bridge_pool_vp { + use std::env::temp_dir; + + use borsh::{BorshDeserialize, BorshSerialize}; + use namada_core::ledger::eth_bridge::storage::bridge_pool::get_signed_root_key; + use namada_core::types::address; + use namada_ethereum_bridge::parameters::{ + Contracts, EthereumBridgeConfig, UpgradeableContract, + }; + + use super::*; + use crate::ledger::gas::VpGasMeter; + use crate::ledger::storage::mockdb::MockDB; + use crate::ledger::storage::traits::Sha256Hasher; + use crate::ledger::storage::write_log::WriteLog; + use crate::ledger::storage::Storage; + use crate::proto::Tx; + use crate::types::address::wnam; + use crate::types::chain::ChainId; + use crate::types::eth_bridge_pool::{GasFee, TransferToEthereum}; + use crate::types::ethereum_events::EthAddress; + use crate::types::hash::Hash; + use crate::types::key::{common, ed25519, SecretKey, SigScheme}; + use crate::types::storage::TxIndex; + use crate::vm::wasm::VpCache; + use crate::vm::WasmCacheRwAccess; + + /// The amount of NAM Bertha has + const ASSET: EthAddress = EthAddress([0; 20]); + const BERTHA_WEALTH: u64 = 1_000_000; + const BERTHA_TOKENS: u64 = 10_000; + const ESCROWED_AMOUNT: u64 = 1_000; + const ESCROWED_TOKENS: u64 = 1_000; + const GAS_FEE: u64 = 100; + const TOKENS: u64 = 100; + + /// A set of balances for an address + struct Balance { + owner: Address, + balance: Amount, + token: Amount, + } + + impl Balance { + fn new(address: Address) -> Self { + Self { + owner: address, + balance: 0.into(), + token: 0.into(), + } + } + } + + /// An established user address for testing & development + fn bertha_address() -> Address { + Address::decode("atest1v4ehgw36xvcyyvejgvenxs34g3zygv3jxqunjd6rxyeyys3sxy6rwvfkx4qnj33hg9qnvse4lsfctw") + .expect("The token address decoding shouldn't fail") + } + + /// A sampled established address for tests + pub fn established_address_1() -> Address { + Address::decode("atest1v4ehgw36g56ngwpk8ppnzsf4xqeyvsf3xq6nxde5gseyys3nxgenvvfex5cnyd2rx9zrzwfctgx7sp") + .expect("The token address decoding shouldn't fail") + } + + fn bertha_keypair() -> common::SecretKey { + // generated from + // [`namada::types::key::ed25519::gen_keypair`] + let bytes = [ + 240, 3, 224, 69, 201, 148, 60, 53, 112, 79, 80, 107, 101, 127, 186, + 6, 176, 162, 113, 224, 62, 8, 183, 187, 124, 234, 244, 251, 92, 36, + 119, 243, + ]; + let ed_sk = ed25519::SecretKey::try_from_slice(&bytes).unwrap(); + ed_sk.try_to_sk().unwrap() + } + + /// The bridge pool at the beginning of all tests + fn initial_pool() -> PendingTransfer { + PendingTransfer { + transfer: TransferToEthereum { + asset: ASSET, + sender: bertha_address(), + recipient: EthAddress([0; 20]), + amount: 0.into(), + nonce: 0u64.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + } + } + + /// Create a writelog representing storage before a transfer is added to the + /// pool. + fn new_writelog() -> WriteLog { + let mut writelog = WriteLog::default(); + // setup the initial bridge pool storage + writelog + .write(&get_signed_root_key(), Hash([0; 32]).try_to_vec().unwrap()) + .expect("Test failed"); + let transfer = initial_pool(); + writelog + .write(&get_pending_key(&transfer), transfer.try_to_vec().unwrap()) + .expect("Test failed"); + // set up a user with a balance + update_balances( + &mut writelog, + Balance::new(bertha_address()), + SignedAmount::Positive(BERTHA_WEALTH.into()), + SignedAmount::Positive(BERTHA_TOKENS.into()), + ); + // set up the initial balances of the bridge pool + update_balances( + &mut writelog, + Balance::new(BRIDGE_POOL_ADDRESS), + SignedAmount::Positive(ESCROWED_AMOUNT.into()), + SignedAmount::Positive(ESCROWED_TOKENS.into()), + ); + writelog.commit_tx(); + writelog + } + + /// Update gas and token balances of an address and + /// return the keys changed + fn update_balances( + write_log: &mut WriteLog, + balance: Balance, + gas_delta: SignedAmount, + token_delta: SignedAmount, + ) -> BTreeSet { + // get the balance keys + let token_key = + wrapped_erc20s::Keys::from(&ASSET).balance(&balance.owner); + let account_key = balance_key(&nam(), &balance.owner); + + // update the balance of nam + let new_balance = match gas_delta { + SignedAmount::Positive(amount) => balance.balance + amount, + SignedAmount::Negative(amount) => balance.balance - amount, + } + .try_to_vec() + .expect("Test failed"); + + // update the balance of tokens + let new_token_balance = match token_delta { + SignedAmount::Positive(amount) => balance.token + amount, + SignedAmount::Negative(amount) => balance.token - amount, + } + .try_to_vec() + .expect("Test failed"); + + // write the changes to the log + write_log + .write(&account_key, new_balance) + .expect("Test failed"); + write_log + .write(&token_key, new_token_balance) + .expect("Test failed"); + + // return the keys changed + [account_key, token_key].into() + } + + /// Initialize some dummy storage for testing + fn setup_storage() -> Storage { + let mut storage = Storage::::open( + std::path::Path::new(""), + ChainId::default(), + address::nam(), + None, + ); + // a dummy config for testing + let config = EthereumBridgeConfig { + min_confirmations: Default::default(), + contracts: Contracts { + native_erc20: wnam(), + bridge: UpgradeableContract { + address: EthAddress([42; 20]), + version: Default::default(), + }, + governance: UpgradeableContract { + address: EthAddress([18; 20]), + version: Default::default(), + }, + }, + }; + config.init_storage(&mut storage); + storage + } + + /// Setup a ctx for running native vps + fn setup_ctx<'a>( + tx: &'a Tx, + storage: &'a Storage, + write_log: &'a WriteLog, + keys_changed: &'a BTreeSet, + verifiers: &'a BTreeSet
, + ) -> Ctx<'a, MockDB, Sha256Hasher, WasmCacheRwAccess> { + Ctx::new( + &BRIDGE_POOL_ADDRESS, + storage, + write_log, + tx, + &TxIndex(0), + VpGasMeter::new(0u64), + keys_changed, + verifiers, + VpCache::new(temp_dir(), 100usize), + ) + } + + enum Expect { + True, + False, + Error, + } + + /// Helper function that tests various ways gas can be escrowed, + /// either correctly or incorrectly, is handled appropriately + fn assert_bridge_pool( + payer_gas_delta: SignedAmount, + gas_escrow_delta: SignedAmount, + payer_delta: SignedAmount, + escrow_delta: SignedAmount, + insert_transfer: F, + expect: Expect, + ) where + F: FnOnce(PendingTransfer, &mut WriteLog) -> BTreeSet, + { + // setup + let mut write_log = new_writelog(); + let storage = setup_storage(); + let tx = Tx::new(vec![], None); + + // the transfer to be added to the pool + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: ASSET, + sender: bertha_address(), + recipient: EthAddress([1; 20]), + amount: TOKENS.into(), + nonce: 1u64.into(), + }, + gas_fee: GasFee { + amount: GAS_FEE.into(), + payer: bertha_address(), + }, + }; + // add transfer to pool + let mut keys_changed = + insert_transfer(transfer.clone(), &mut write_log); + + // change Bertha's balances + let mut new_keys_changed = update_balances( + &mut write_log, + Balance { + owner: bertha_address(), + balance: BERTHA_WEALTH.into(), + token: BERTHA_TOKENS.into(), + }, + payer_gas_delta, + payer_delta, + ); + keys_changed.append(&mut new_keys_changed); + + // change the bridge pool balances + let mut new_keys_changed = update_balances( + &mut write_log, + Balance { + owner: BRIDGE_POOL_ADDRESS, + balance: ESCROWED_AMOUNT.into(), + token: ESCROWED_TOKENS.into(), + }, + gas_escrow_delta, + escrow_delta, + ); + keys_changed.append(&mut new_keys_changed); + let verifiers = BTreeSet::default(); + // create the data to be given to the vp + let vp = BridgePoolVp { + ctx: setup_ctx( + &tx, + &storage, + &write_log, + &keys_changed, + &verifiers, + ), + }; + + let to_sign = transfer.try_to_vec().expect("Test failed"); + let sig = common::SigScheme::sign(&bertha_keypair(), &to_sign); + let signed = SignedTxData { + data: Some(to_sign), + sig, + } + .try_to_vec() + .expect("Test failed"); + + let res = vp.validate_tx(&signed, &keys_changed, &verifiers); + match expect { + Expect::True => assert!(res.expect("Test failed")), + Expect::False => assert!(!res.expect("Test failed")), + Expect::Error => assert!(res.is_err()), + } + } + + /// Test adding a transfer to the pool and escrowing gas passes vp + #[test] + fn test_happy_flow() { + assert_bridge_pool( + SignedAmount::Negative(GAS_FEE.into()), + SignedAmount::Positive(GAS_FEE.into()), + SignedAmount::Negative(TOKENS.into()), + SignedAmount::Positive(TOKENS.into()), + |transfer, log| { + log.write( + &get_pending_key(&transfer), + transfer.try_to_vec().unwrap(), + ) + .unwrap(); + BTreeSet::from([get_pending_key(&transfer)]) + }, + Expect::True, + ); + } + + /// Test that if the balance for the gas payer + /// was not correctly adjusted, reject + #[test] + fn test_incorrect_gas_withdrawn() { + assert_bridge_pool( + SignedAmount::Negative(10.into()), + SignedAmount::Positive(GAS_FEE.into()), + SignedAmount::Negative(TOKENS.into()), + SignedAmount::Positive(TOKENS.into()), + |transfer, log| { + log.write( + &get_pending_key(&transfer), + transfer.try_to_vec().unwrap(), + ) + .unwrap(); + BTreeSet::from([get_pending_key(&transfer)]) + }, + Expect::False, + ); + } + + /// Test that if the gas payer's balance + /// does not decrease, we reject the tx + #[test] + fn test_payer_balance_must_decrease() { + assert_bridge_pool( + SignedAmount::Positive(GAS_FEE.into()), + SignedAmount::Positive(GAS_FEE.into()), + SignedAmount::Negative(TOKENS.into()), + SignedAmount::Positive(TOKENS.into()), + |transfer, log| { + log.write( + &get_pending_key(&transfer), + transfer.try_to_vec().unwrap(), + ) + .unwrap(); + BTreeSet::from([get_pending_key(&transfer)]) + }, + Expect::False, + ); + } + + /// Test that if the gas amount escrowed is incorrect, + /// the tx is rejected + #[test] + fn test_incorrect_gas_deposited() { + assert_bridge_pool( + SignedAmount::Negative(GAS_FEE.into()), + SignedAmount::Positive(10.into()), + SignedAmount::Negative(TOKENS.into()), + SignedAmount::Positive(TOKENS.into()), + |transfer, log| { + log.write( + &get_pending_key(&transfer), + transfer.try_to_vec().unwrap(), + ) + .unwrap(); + BTreeSet::from([get_pending_key(&transfer)]) + }, + Expect::False, + ); + } + + /// Test that if the number of tokens debited + /// from one account does not equal the amount + /// credited the other, the tx is rejected + #[test] + fn test_incorrect_token_deltas() { + assert_bridge_pool( + SignedAmount::Negative(GAS_FEE.into()), + SignedAmount::Positive(GAS_FEE.into()), + SignedAmount::Negative(TOKENS.into()), + SignedAmount::Positive(10.into()), + |transfer, log| { + log.write( + &get_pending_key(&transfer), + transfer.try_to_vec().unwrap(), + ) + .unwrap(); + BTreeSet::from([get_pending_key(&transfer)]) + }, + Expect::False, + ); + } + + /// Test that if the number of tokens transferred + /// is incorrect, the tx is rejected + #[test] + fn test_incorrect_tokens_escrowed() { + assert_bridge_pool( + SignedAmount::Negative(GAS_FEE.into()), + SignedAmount::Positive(GAS_FEE.into()), + SignedAmount::Negative(10.into()), + SignedAmount::Positive(10.into()), + |transfer, log| { + log.write( + &get_pending_key(&transfer), + transfer.try_to_vec().unwrap(), + ) + .unwrap(); + BTreeSet::from([get_pending_key(&transfer)]) + }, + Expect::False, + ); + } + + /// Test that the amount of gas escrowed increases, + /// otherwise the tx is rejected. + #[test] + fn test_escrowed_gas_must_increase() { + assert_bridge_pool( + SignedAmount::Negative(GAS_FEE.into()), + SignedAmount::Negative(GAS_FEE.into()), + SignedAmount::Negative(TOKENS.into()), + SignedAmount::Positive(TOKENS.into()), + |transfer, log| { + log.write( + &get_pending_key(&transfer), + transfer.try_to_vec().unwrap(), + ) + .unwrap(); + BTreeSet::from([get_pending_key(&transfer)]) + }, + Expect::False, + ); + } + + /// Test that the amount of tokens escrowed in the + /// bridge pool is positive. + #[test] + fn test_escrowed_tokens_must_increase() { + assert_bridge_pool( + SignedAmount::Negative(GAS_FEE.into()), + SignedAmount::Positive(GAS_FEE.into()), + SignedAmount::Positive(TOKENS.into()), + SignedAmount::Negative(TOKENS.into()), + |transfer, log| { + log.write( + &get_pending_key(&transfer), + transfer.try_to_vec().unwrap(), + ) + .unwrap(); + BTreeSet::from([get_pending_key(&transfer)]) + }, + Expect::False, + ); + } + + /// Test that if the transfer was not added to the + /// pool, the vp rejects + #[test] + fn test_not_adding_transfer_rejected() { + assert_bridge_pool( + SignedAmount::Negative(GAS_FEE.into()), + SignedAmount::Positive(GAS_FEE.into()), + SignedAmount::Negative(TOKENS.into()), + SignedAmount::Positive(TOKENS.into()), + |transfer, _| BTreeSet::from([get_pending_key(&transfer)]), + Expect::Error, + ); + } + + /// Test that if the wrong transaction was added + /// to the pool, it is rejected. + #[test] + fn test_add_wrong_transfer() { + assert_bridge_pool( + SignedAmount::Negative(GAS_FEE.into()), + SignedAmount::Positive(GAS_FEE.into()), + SignedAmount::Negative(TOKENS.into()), + SignedAmount::Positive(TOKENS.into()), + |transfer, log| { + let t = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([0; 20]), + sender: bertha_address(), + recipient: EthAddress([1; 20]), + amount: 100.into(), + nonce: 10u64.into(), + }, + gas_fee: GasFee { + amount: GAS_FEE.into(), + payer: bertha_address(), + }, + }; + log.write(&get_pending_key(&transfer), t.try_to_vec().unwrap()) + .unwrap(); + BTreeSet::from([get_pending_key(&transfer)]) + }, + Expect::False, + ); + } + + /// Test that if the wrong transaction was added + /// to the pool, it is rejected. + #[test] + fn test_add_wrong_key() { + assert_bridge_pool( + SignedAmount::Negative(GAS_FEE.into()), + SignedAmount::Positive(GAS_FEE.into()), + SignedAmount::Negative(TOKENS.into()), + SignedAmount::Positive(TOKENS.into()), + |transfer, log| { + let t = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([0; 20]), + sender: bertha_address(), + recipient: EthAddress([1; 20]), + amount: 100.into(), + nonce: 10u64.into(), + }, + gas_fee: GasFee { + amount: GAS_FEE.into(), + payer: bertha_address(), + }, + }; + log.write(&get_pending_key(&t), transfer.try_to_vec().unwrap()) + .unwrap(); + BTreeSet::from([get_pending_key(&transfer)]) + }, + Expect::Error, + ); + } + + /// Test that no tx may alter the storage containing + /// the signed merkle root. + #[test] + fn test_signed_merkle_root_changes_rejected() { + assert_bridge_pool( + SignedAmount::Negative(GAS_FEE.into()), + SignedAmount::Positive(GAS_FEE.into()), + SignedAmount::Negative(TOKENS.into()), + SignedAmount::Positive(TOKENS.into()), + |transfer, log| { + log.write( + &get_pending_key(&transfer), + transfer.try_to_vec().unwrap(), + ) + .unwrap(); + BTreeSet::from([ + get_pending_key(&transfer), + get_signed_root_key(), + ]) + }, + Expect::False, + ); + } + + /// Test that adding a transfer to the pool + /// that is already in the pool fails. + #[test] + fn test_adding_transfer_twice_fails() { + // setup + let mut write_log = new_writelog(); + let storage = setup_storage(); + let tx = Tx::new(vec![], None); + + // the transfer to be added to the pool + let transfer = initial_pool(); + + // add transfer to pool + let mut keys_changed = { + write_log + .write( + &get_pending_key(&transfer), + transfer.try_to_vec().unwrap(), + ) + .unwrap(); + BTreeSet::from([get_pending_key(&transfer)]) + }; + + // update Bertha's balances + let mut new_keys_changed = update_balances( + &mut write_log, + Balance { + owner: bertha_address(), + balance: BERTHA_WEALTH.into(), + token: BERTHA_TOKENS.into(), + }, + SignedAmount::Negative(GAS_FEE.into()), + SignedAmount::Negative(TOKENS.into()), + ); + keys_changed.append(&mut new_keys_changed); + + // update the bridge pool balances + let mut new_keys_changed = update_balances( + &mut write_log, + Balance { + owner: BRIDGE_POOL_ADDRESS, + balance: ESCROWED_AMOUNT.into(), + token: ESCROWED_TOKENS.into(), + }, + SignedAmount::Positive(GAS_FEE.into()), + SignedAmount::Positive(TOKENS.into()), + ); + keys_changed.append(&mut new_keys_changed); + let verifiers = BTreeSet::default(); + + // create the data to be given to the vp + let vp = BridgePoolVp { + ctx: setup_ctx( + &tx, + &storage, + &write_log, + &keys_changed, + &verifiers, + ), + }; + + let to_sign = transfer.try_to_vec().expect("Test failed"); + let sig = common::SigScheme::sign(&bertha_keypair(), &to_sign); + let signed = SignedTxData { + data: Some(to_sign), + sig, + } + .try_to_vec() + .expect("Test failed"); + + let res = vp.validate_tx(&signed, &keys_changed, &verifiers); + assert!(!res.expect("Test failed")); + } + + /// Test that a transfer added to the pool with zero gas fees + /// is rejected. + #[test] + fn test_zero_gas_fees_rejected() { + // setup + let mut write_log = new_writelog(); + let storage = setup_storage(); + let tx = Tx::new(vec![], None); + + // the transfer to be added to the pool + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: ASSET, + sender: bertha_address(), + recipient: EthAddress([1; 20]), + amount: 0.into(), + nonce: 1u64.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + + // add transfer to pool + let mut keys_changed = { + write_log + .write( + &get_pending_key(&transfer), + transfer.try_to_vec().unwrap(), + ) + .unwrap(); + BTreeSet::from([get_pending_key(&transfer)]) + }; + // We escrow 0 tokens + keys_changed.insert( + wrapped_erc20s::Keys::from(&ASSET).balance(&bertha_address()), + ); + keys_changed.insert( + wrapped_erc20s::Keys::from(&ASSET).balance(&BRIDGE_POOL_ADDRESS), + ); + + let verifiers = BTreeSet::default(); + // create the data to be given to the vp + let vp = BridgePoolVp { + ctx: setup_ctx( + &tx, + &storage, + &write_log, + &keys_changed, + &verifiers, + ), + }; + + let to_sign = transfer.try_to_vec().expect("Test failed"); + let sig = common::SigScheme::sign(&bertha_keypair(), &to_sign); + let signed = SignedTxData { + data: Some(to_sign), + sig, + } + .try_to_vec() + .expect("Test failed"); + + let res = vp + .validate_tx(&signed, &keys_changed, &verifiers) + .expect("Test failed"); + assert!(!res); + } + + /// Test that we can escrow Nam if we + /// want to mint wNam on Ethereum. + #[test] + fn test_mint_wnam() { + // setup + let mut write_log = new_writelog(); + let eb_account_key = + balance_key(&nam(), &Address::Internal(InternalAddress::EthBridge)); + write_log.commit_tx(); + let storage = setup_storage(); + let tx = Tx::new(vec![], None); + + // the transfer to be added to the pool + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: wnam(), + sender: bertha_address(), + recipient: EthAddress([1; 20]), + amount: 100.into(), + nonce: 1u64.into(), + }, + gas_fee: GasFee { + amount: 100.into(), + payer: bertha_address(), + }, + }; + + // add transfer to pool + let keys_changed = { + write_log + .write( + &get_pending_key(&transfer), + transfer.try_to_vec().unwrap(), + ) + .unwrap(); + BTreeSet::from([get_pending_key(&transfer)]) + }; + // We escrow 100 Nam into the bridge pool VP + // and 100 Nam in the Eth bridge VP + let account_key = balance_key(&nam(), &bertha_address()); + write_log + .write( + &account_key, + Amount::from(BERTHA_WEALTH - 200) + .try_to_vec() + .expect("Test failed"), + ) + .expect("Test failed"); + let bp_account_key = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); + write_log + .write( + &bp_account_key, + Amount::from(ESCROWED_AMOUNT + 100) + .try_to_vec() + .expect("Test failed"), + ) + .expect("Test failed"); + write_log + .write( + &eb_account_key, + Amount::from(100).try_to_vec().expect("Test failed"), + ) + .expect("Test failed"); + + let verifiers = BTreeSet::default(); + // create the data to be given to the vp + let vp = BridgePoolVp { + ctx: setup_ctx( + &tx, + &storage, + &write_log, + &keys_changed, + &verifiers, + ), + }; + + let to_sign = transfer.try_to_vec().expect("Test failed"); + let sig = common::SigScheme::sign(&bertha_keypair(), &to_sign); + let signed = SignedTxData { + data: Some(to_sign), + sig, + } + .try_to_vec() + .expect("Test failed"); + + let res = vp + .validate_tx(&signed, &keys_changed, &verifiers) + .expect("Test failed"); + assert!(res); + } + + /// Test that we can reject a transfer that + /// mints wNam if we don't escrow the correct + /// amount of Nam. + #[test] + fn test_reject_mint_wnam() { + // setup + let mut write_log = new_writelog(); + write_log.commit_tx(); + let storage = setup_storage(); + let tx = Tx::new(vec![], None); + let eb_account_key = + balance_key(&nam(), &Address::Internal(InternalAddress::EthBridge)); + + // the transfer to be added to the pool + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: wnam(), + sender: bertha_address(), + recipient: EthAddress([1; 20]), + amount: 100.into(), + nonce: 1u64.into(), + }, + gas_fee: GasFee { + amount: 100.into(), + payer: bertha_address(), + }, + }; + + // add transfer to pool + let keys_changed = { + write_log + .write( + &get_pending_key(&transfer), + transfer.try_to_vec().unwrap(), + ) + .unwrap(); + BTreeSet::from([get_pending_key(&transfer)]) + }; + // We escrow 100 Nam into the bridge pool VP + // and 100 Nam in the Eth bridge VP + let account_key = balance_key(&nam(), &bertha_address()); + write_log + .write( + &account_key, + Amount::from(BERTHA_WEALTH - 200) + .try_to_vec() + .expect("Test failed"), + ) + .expect("Test failed"); + let bp_account_key = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); + write_log + .write( + &bp_account_key, + Amount::from(ESCROWED_AMOUNT + 100) + .try_to_vec() + .expect("Test failed"), + ) + .expect("Test failed"); + write_log + .write( + &eb_account_key, + Amount::from(10).try_to_vec().expect("Test failed"), + ) + .expect("Test failed"); + let verifiers = BTreeSet::default(); + + // create the data to be given to the vp + let vp = BridgePoolVp { + ctx: setup_ctx( + &tx, + &storage, + &write_log, + &keys_changed, + &verifiers, + ), + }; + + let to_sign = transfer.try_to_vec().expect("Test failed"); + let sig = common::SigScheme::sign(&bertha_keypair(), &to_sign); + let signed = SignedTxData { + data: Some(to_sign), + sig, + } + .try_to_vec() + .expect("Test failed"); + + let res = vp + .validate_tx(&signed, &keys_changed, &verifiers) + .expect("Test failed"); + assert!(!res); + } + + /// Test that we check escrowing Nam correctly when minting wNam + /// and the gas payer account is different from the transferring + /// account. + #[test] + fn test_mint_wnam_separate_gas_payer() { + // setup + let mut write_log = new_writelog(); + // initialize the eth bridge balance to 0 + let eb_account_key = + balance_key(&nam(), &Address::Internal(InternalAddress::EthBridge)); + write_log + .write( + &eb_account_key, + Amount::default().try_to_vec().expect("Test failed"), + ) + .expect("Test failed"); + // initialize the gas payers account + let gas_payer_balance_key = + balance_key(&nam(), &established_address_1()); + write_log + .write( + &gas_payer_balance_key, + Amount::from(BERTHA_WEALTH) + .try_to_vec() + .expect("Test failed"), + ) + .expect("Test failed"); + write_log.commit_tx(); + let storage = setup_storage(); + let tx = Tx::new(vec![], None); + + // the transfer to be added to the pool + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: wnam(), + sender: bertha_address(), + recipient: EthAddress([1; 20]), + amount: 100.into(), + nonce: 1u64.into(), + }, + gas_fee: GasFee { + amount: 100.into(), + payer: established_address_1(), + }, + }; + + // add transfer to pool + let keys_changed = { + write_log + .write( + &get_pending_key(&transfer), + transfer.try_to_vec().unwrap(), + ) + .unwrap(); + BTreeSet::from([get_pending_key(&transfer)]) + }; + // We escrow 100 Nam into the bridge pool VP + // and 100 Nam in the Eth bridge VP + let account_key = balance_key(&nam(), &bertha_address()); + write_log + .write( + &account_key, + Amount::from(BERTHA_WEALTH - 100) + .try_to_vec() + .expect("Test failed"), + ) + .expect("Test failed"); + write_log + .write( + &gas_payer_balance_key, + Amount::from(BERTHA_WEALTH - 100) + .try_to_vec() + .expect("Test failed"), + ) + .expect("Test failed"); + let bp_account_key = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); + write_log + .write( + &bp_account_key, + Amount::from(ESCROWED_AMOUNT + 100) + .try_to_vec() + .expect("Test failed"), + ) + .expect("Test failed"); + write_log + .write( + &eb_account_key, + Amount::from(10).try_to_vec().expect("Test failed"), + ) + .expect("Test failed"); + let verifiers = BTreeSet::default(); + // create the data to be given to the vp + let vp = BridgePoolVp { + ctx: setup_ctx( + &tx, + &storage, + &write_log, + &keys_changed, + &verifiers, + ), + }; + + let to_sign = transfer.try_to_vec().expect("Test failed"); + let sig = common::SigScheme::sign(&bertha_keypair(), &to_sign); + let signed = SignedTxData { + data: Some(to_sign), + sig, + } + .try_to_vec() + .expect("Test failed"); + + let res = vp + .validate_tx(&signed, &keys_changed, &verifiers) + .expect("Test failed"); + assert!(!res); + } +} diff --git a/shared/src/ledger/native_vp/ethereum_bridge/mod.rs b/shared/src/ledger/native_vp/ethereum_bridge/mod.rs new file mode 100644 index 0000000000..7e5062a251 --- /dev/null +++ b/shared/src/ledger/native_vp/ethereum_bridge/mod.rs @@ -0,0 +1,7 @@ +//! Native validity predicates for the Namada Ethereum bridge. +//! This includes both the bridge vp and the vp for the bridge +//! pool. + +mod authorize; +pub mod bridge_pool_vp; +pub mod vp; diff --git a/shared/src/ledger/native_vp/ethereum_bridge/vp.rs b/shared/src/ledger/native_vp/ethereum_bridge/vp.rs new file mode 100644 index 0000000000..e8cb3b7158 --- /dev/null +++ b/shared/src/ledger/native_vp/ethereum_bridge/vp.rs @@ -0,0 +1,757 @@ +//! Validity predicate for the Ethereum bridge +use std::collections::{BTreeSet, HashSet}; + +use borsh::BorshDeserialize; +use eyre::{eyre, Result}; +use itertools::Itertools; +use namada_core::ledger::eth_bridge::storage::{ + self, escrow_key, wrapped_erc20s, +}; +use namada_core::ledger::storage::traits::StorageHasher; +use namada_core::ledger::{eth_bridge, storage as ledger_storage}; +use namada_core::types::address::{nam, Address, InternalAddress}; +use namada_core::types::storage::Key; +use namada_core::types::token::{balance_key, Amount}; + +use crate::ledger::native_vp::ethereum_bridge::authorize; +use crate::ledger::native_vp::{Ctx, NativeVp, StorageReader, VpEnv}; +use crate::vm::WasmCacheAccess; + +/// Validity predicate for the Ethereum bridge +pub struct EthBridge<'ctx, DB, H, CA> +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: StorageHasher, + CA: 'static + WasmCacheAccess, +{ + /// Context to interact with the host structures. + pub ctx: Ctx<'ctx, DB, H, CA>, +} + +impl<'ctx, DB, H, CA> EthBridge<'ctx, DB, H, CA> +where + DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: 'static + StorageHasher, + CA: 'static + WasmCacheAccess, +{ + /// If the bridge's escrow key was changed, we check + /// that the balance increased and that the bridge pool + /// VP has been triggered. The bridge pool VP will carry + /// out the rest of the checks. + fn check_escrow( + &self, + verifiers: &BTreeSet
, + ) -> Result { + let escrow_key = balance_key(&nam(), ð_bridge::ADDRESS); + let escrow_pre: Amount = if let Ok(Some(bytes)) = + self.ctx.read_bytes_pre(&escrow_key) + { + BorshDeserialize::try_from_slice(bytes.as_slice()).map_err( + |_| Error(eyre!("Couldn't deserialize a balance from storage")), + )? + } else { + tracing::debug!( + "Could not retrieve the Ethereum bridge VP's balance from \ + storage" + ); + return Ok(false); + }; + let escrow_post: Amount = + if let Ok(Some(bytes)) = self.ctx.read_bytes_post(&escrow_key) { + BorshDeserialize::try_from_slice(bytes.as_slice()).map_err( + |_| { + Error(eyre!( + "Couldn't deserialize the balance of the Ethereum \ + bridge VP from storage." + )) + }, + )? + } else { + tracing::debug!( + "Could not retrieve the modified Ethereum bridge VP's \ + balance after applying tx" + ); + return Ok(false); + }; + + // The amount escrowed should increase. + if escrow_pre < escrow_post { + Ok(verifiers.contains(&storage::bridge_pool::BRIDGE_POOL_ADDRESS)) + } else { + tracing::info!( + "A normal tx cannot decrease the amount of Nam escrowed in \ + the Ethereum bridge" + ); + Ok(false) + } + } +} + +/// One of the the two types of checks +/// this VP must perform. +#[derive(Debug)] +enum CheckType { + Escrow, + Erc20Transfer(wrapped_erc20s::Key, wrapped_erc20s::Key), +} + +#[derive(thiserror::Error, Debug)] +#[error(transparent)] +/// Generic error that may be returned by the validity predicate +pub struct Error(#[from] eyre::Error); + +impl<'a, DB, H, CA> NativeVp for EthBridge<'a, DB, H, CA> +where + DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: 'static + StorageHasher, + CA: 'static + WasmCacheAccess, +{ + type Error = Error; + + const ADDR: InternalAddress = eth_bridge::INTERNAL_ADDRESS; + + /// Validate that a wasm transaction is permitted to change keys under this + /// account. + /// + /// We permit only the following changes via wasm for the time being: + /// - a wrapped ERC20's supply key to decrease iff one of its balance keys + /// decreased by the same amount + /// - a wrapped ERC20's balance key to decrease iff another one of its + /// balance keys increased by the same amount + /// - Escrowing Nam in order to mint wrapped Nam on Ethereum + /// + /// Some other changes to the storage subspace of this account are expected + /// to happen natively i.e. bypassing this validity predicate. For example, + /// changes to the `eth_msgs/...` keys. For those cases, we reject here as + /// no wasm transactions should be able to modify those keys. + fn validate_tx( + &self, + tx_data: &[u8], + keys_changed: &BTreeSet, + verifiers: &BTreeSet
, + ) -> Result { + tracing::debug!( + tx_data_len = tx_data.len(), + keys_changed_len = keys_changed.len(), + verifiers_len = verifiers.len(), + "Ethereum Bridge VP triggered", + ); + + let (key_a, key_b) = match determine_check_type(keys_changed)? { + Some(CheckType::Erc20Transfer(key_a, key_b)) => (key_a, key_b), + Some(CheckType::Escrow) => return self.check_escrow(verifiers), + None => return Ok(false), + }; + let (sender, _) = match check_balance_changes(&self.ctx, key_a, key_b)? + { + Some(sender) => sender, + None => return Ok(false), + }; + let authed = authorize::is_authorized(&self.ctx, tx_data, &sender)?; + Ok(authed) + } +} + +/// Checks if `keys_changed` represents a valid set of changed keys. +/// Depending on which keys get changed, chooses which type of +/// check to perform in the `validate_tx` function. +/// 1. If the Ethereum bridge escrow key was changed, we need to check +/// that escrow was performed correctly. +/// 2. If two erc20 keys where changed, this is a transfer that needs +/// to be checked. +fn determine_check_type( + keys_changed: &BTreeSet, +) -> Result, Error> { + // we aren't concerned with keys that changed outside of our account + let keys_changed: HashSet<_> = keys_changed + .iter() + .filter(|key| storage::is_eth_bridge_key(key)) + .collect(); + if keys_changed.is_empty() { + return Err(Error(eyre!( + "No keys changed under our account so this validity predicate \ + shouldn't have been triggered" + ))); + } + tracing::debug!( + relevant_keys.len = keys_changed.len(), + "Found keys changed under our account" + ); + if keys_changed.len() == 1 && keys_changed.contains(&escrow_key()) { + return Ok(Some(CheckType::Escrow)); + } else if keys_changed.len() != 2 { + tracing::debug!( + relevant_keys.len = keys_changed.len(), + "Rejecting transaction as only two keys should have changed" + ); + return Ok(None); + } + + let mut keys = HashSet::<_>::default(); + for key in keys_changed.into_iter() { + let key = match wrapped_erc20s::Key::try_from(key) { + Ok(key) => { + // Until burning is implemented, we disallow changes to any + // supply keys via wasm transactions + if matches!(key.suffix, wrapped_erc20s::KeyType::Supply) { + tracing::debug!( + ?key, + "Rejecting transaction as key is a supply key" + ); + return Ok(None); + } + key + } + Err(error) => { + tracing::debug!( + %key, + ?error, + "Rejecting transaction as key is not a wrapped ERC20 key" + ); + return Ok(None); + } + }; + keys.insert(key); + } + + // We can .unwrap() here as we know for sure that this set has len=2 + let (key_a, key_b) = keys.into_iter().collect_tuple().unwrap(); + if key_a.asset != key_b.asset { + tracing::debug!( + ?key_a, + ?key_b, + "Rejecting transaction as keys are for different assets" + ); + return Ok(None); + } + Ok(Some(CheckType::Erc20Transfer(key_a, key_b))) +} + +/// Checks that the balances at both `key_a` and `key_b` have changed by some +/// amount, and that the changes balance each other out. If the balance changes +/// are invalid, the reason is logged and a `None` is returned. Otherwise, +/// return the `Address` of the owner of the balance which is decreasing, +/// and by how much it decreased, which should be authorizing the balance +/// change. +pub(super) fn check_balance_changes( + reader: impl StorageReader, + key_a: wrapped_erc20s::Key, + key_b: wrapped_erc20s::Key, +) -> Result> { + let (balance_a, balance_b) = + match (key_a.suffix.clone(), key_b.suffix.clone()) { + ( + wrapped_erc20s::KeyType::Balance { .. }, + wrapped_erc20s::KeyType::Balance { .. }, + ) => (Key::from(&key_a), Key::from(&key_b)), + ( + wrapped_erc20s::KeyType::Balance { .. }, + wrapped_erc20s::KeyType::Supply, + ) + | ( + wrapped_erc20s::KeyType::Supply, + wrapped_erc20s::KeyType::Balance { .. }, + ) => { + tracing::debug!( + ?key_a, + ?key_b, + "Rejecting transaction that is attempting to change a \ + supply key" + ); + return Ok(None); + } + ( + wrapped_erc20s::KeyType::Supply, + wrapped_erc20s::KeyType::Supply, + ) => { + // in theory, this should be unreachable!() as we would have + // already rejected if both supply keys were for + // the same asset + tracing::debug!( + ?key_a, + ?key_b, + "Rejecting transaction that is attempting to change two \ + supply keys" + ); + return Ok(None); + } + }; + let balance_a_pre = reader + .read_pre_value::(&balance_a)? + .unwrap_or_default() + .change(); + let balance_a_post = match reader.read_post_value::(&balance_a)? { + Some(value) => value, + None => { + tracing::debug!( + ?balance_a, + "Rejecting transaction as could not read_post balance key" + ); + return Ok(None); + } + } + .change(); + let balance_b_pre = reader + .read_pre_value::(&balance_b)? + .unwrap_or_default() + .change(); + let balance_b_post = match reader.read_post_value::(&balance_b)? { + Some(value) => value, + None => { + tracing::debug!( + ?balance_b, + "Rejecting transaction as could not read_post balance key" + ); + return Ok(None); + } + } + .change(); + + let balance_a_delta = calculate_delta(balance_a_pre, balance_a_post)?; + let balance_b_delta = calculate_delta(balance_b_pre, balance_b_post)?; + if balance_a_delta != -balance_b_delta { + tracing::debug!( + ?balance_a_pre, + ?balance_b_pre, + ?balance_a_post, + ?balance_b_post, + ?balance_a_delta, + ?balance_b_delta, + "Rejecting transaction as balance changes do not match" + ); + return Ok(None); + } + if balance_a_delta == 0 { + assert_eq!(balance_b_delta, 0); + tracing::debug!("Rejecting transaction as no balance change"); + return Ok(None); + } + if balance_a_post < 0 { + tracing::debug!( + ?balance_a_post, + "Rejecting transaction as balance is negative" + ); + return Ok(None); + } + if balance_b_post < 0 { + tracing::debug!( + ?balance_b_post, + "Rejecting transaction as balance is negative" + ); + return Ok(None); + } + + if balance_a_delta < 0 { + if let wrapped_erc20s::KeyType::Balance { owner } = key_a.suffix { + Ok(Some(( + owner, + Amount::from( + u64::try_from(balance_b_delta) + .expect("This should not fail"), + ), + ))) + } else { + unreachable!() + } + } else { + assert!(balance_b_delta < 0); + if let wrapped_erc20s::KeyType::Balance { owner } = key_b.suffix { + Ok(Some(( + owner, + Amount::from( + u64::try_from(balance_a_delta) + .expect("This should not fail"), + ), + ))) + } else { + unreachable!() + } + } +} + +/// Return the delta between `balance_pre` and `balance_post`, erroring if there +/// is an underflow +fn calculate_delta(balance_pre: i128, balance_post: i128) -> Result { + match balance_post.checked_sub(balance_pre) { + Some(result) => Ok(result), + None => Err(eyre!( + "Underflow while calculating delta: {} - {}", + balance_post, + balance_pre + )), + } +} + +#[cfg(test)] +mod tests { + use std::default::Default; + use std::env::temp_dir; + + use borsh::BorshSerialize; + use namada_core::ledger::eth_bridge; + use namada_core::ledger::eth_bridge::storage::bridge_pool::BRIDGE_POOL_ADDRESS; + use namada_core::types::address; + use namada_ethereum_bridge::parameters::{ + Contracts, EthereumBridgeConfig, UpgradeableContract, + }; + use rand::Rng; + + use super::*; + use crate::ledger::gas::VpGasMeter; + use crate::ledger::storage::mockdb::MockDB; + use crate::ledger::storage::traits::Sha256Hasher; + use crate::ledger::storage::write_log::WriteLog; + use crate::ledger::storage::Storage; + use crate::proto::Tx; + use crate::types::address::wnam; + use crate::types::chain::ChainId; + use crate::types::ethereum_events; + use crate::types::ethereum_events::EthAddress; + use crate::types::storage::TxIndex; + use crate::vm::wasm::VpCache; + use crate::vm::WasmCacheRwAccess; + + const ARBITRARY_OWNER_A_ADDRESS: &str = + "atest1d9khqw36x9zyxwfhgfpygv2pgc65gse4gy6rjs34gfzr2v69gy6y23zpggurjv2yx5m52sesu6r4y4"; + const ARBITRARY_OWNER_B_ADDRESS: &str = + "atest1v4ehgw36xuunwd6989prwdfkxqmnvsfjxs6nvv6xxucrs3f3xcmns3fcxdzrvvz9xverzvzr56le8f"; + const ARBITRARY_OWNER_A_INITIAL_BALANCE: u64 = 100; + const ESCROW_AMOUNT: u64 = 100; + const BRIDGE_POOL_ESCROW_INITIAL_BALANCE: u64 = 0; + + /// Return some arbitrary random key belonging to this account + fn arbitrary_key() -> Key { + let mut rng = rand::thread_rng(); + let rn = rng.gen::(); + storage::prefix() + .push(&format!("arbitrary key segment {}", rn)) + .expect("should always be able to construct this key") + } + + /// Initialize some dummy storage for testing + fn setup_storage() -> Storage { + let mut storage = Storage::::open( + std::path::Path::new(""), + ChainId::default(), + address::nam(), + None, + ); + + // setup a user with a balance + let balance_key = balance_key( + &nam(), + &Address::decode(ARBITRARY_OWNER_A_ADDRESS).expect("Test failed"), + ); + storage + .write( + &balance_key, + Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE) + .try_to_vec() + .expect("Test failed"), + ) + .expect("Test failed"); + + // a dummy config for testing + let config = EthereumBridgeConfig { + min_confirmations: Default::default(), + contracts: Contracts { + native_erc20: wnam(), + bridge: UpgradeableContract { + address: EthAddress([42; 20]), + version: Default::default(), + }, + governance: UpgradeableContract { + address: EthAddress([18; 20]), + version: Default::default(), + }, + }, + }; + config.init_storage(&mut storage); + storage + } + + /// Setup a ctx for running native vps + fn setup_ctx<'a>( + tx: &'a Tx, + storage: &'a Storage, + write_log: &'a WriteLog, + keys_changed: &'a BTreeSet, + verifiers: &'a BTreeSet
, + ) -> Ctx<'a, MockDB, Sha256Hasher, WasmCacheRwAccess> { + Ctx::new( + ð_bridge::ADDRESS, + storage, + write_log, + tx, + &TxIndex(0), + VpGasMeter::new(0u64), + keys_changed, + verifiers, + VpCache::new(temp_dir(), 100usize), + ) + } + + #[test] + fn test_error_if_triggered_without_keys_changed() { + let keys_changed = BTreeSet::new(); + + let result = determine_check_type(&keys_changed); + + assert!(result.is_err()); + } + + #[test] + fn test_rejects_if_not_two_keys_changed() { + { + let keys_changed = BTreeSet::from_iter(vec![arbitrary_key(); 3]); + + let result = determine_check_type(&keys_changed); + + assert_matches!(result, Ok(None)); + } + { + let keys_changed = BTreeSet::from_iter(vec![ + escrow_key(), + arbitrary_key(), + arbitrary_key(), + ]); + + let result = determine_check_type(&keys_changed); + + assert_matches!(result, Ok(None)); + } + } + + #[test] + fn test_rejects_if_not_two_multitoken_keys_changed() { + { + let keys_changed = + BTreeSet::from_iter(vec![arbitrary_key(), arbitrary_key()]); + + let result = determine_check_type(&keys_changed); + + assert_matches!(result, Ok(None)); + } + + { + let keys_changed = BTreeSet::from_iter(vec![ + arbitrary_key(), + wrapped_erc20s::Keys::from( + ðereum_events::testing::DAI_ERC20_ETH_ADDRESS, + ) + .supply(), + ]); + + let result = determine_check_type(&keys_changed); + + assert_matches!(result, Ok(None)); + } + + { + let keys_changed = BTreeSet::from_iter(vec![ + arbitrary_key(), + wrapped_erc20s::Keys::from( + ðereum_events::testing::DAI_ERC20_ETH_ADDRESS, + ) + .balance( + &Address::decode(ARBITRARY_OWNER_A_ADDRESS) + .expect("Couldn't set up test"), + ), + ]); + + let result = determine_check_type(&keys_changed); + + assert_matches!(result, Ok(None)); + } + } + + #[test] + fn test_rejects_if_multitoken_keys_for_different_assets() { + { + let keys_changed = BTreeSet::from_iter(vec![ + wrapped_erc20s::Keys::from( + ðereum_events::testing::DAI_ERC20_ETH_ADDRESS, + ) + .balance( + &Address::decode(ARBITRARY_OWNER_A_ADDRESS) + .expect("Couldn't set up test"), + ), + wrapped_erc20s::Keys::from( + ðereum_events::testing::USDC_ERC20_ETH_ADDRESS, + ) + .balance( + &Address::decode(ARBITRARY_OWNER_B_ADDRESS) + .expect("Couldn't set up test"), + ), + ]); + + let result = determine_check_type(&keys_changed); + + assert_matches!(result, Ok(None)); + } + } + + #[test] + fn test_rejects_if_supply_key_changed() { + let asset = ðereum_events::testing::DAI_ERC20_ETH_ADDRESS; + { + let keys_changed = BTreeSet::from_iter(vec![ + wrapped_erc20s::Keys::from(asset).supply(), + wrapped_erc20s::Keys::from(asset).balance( + &Address::decode(ARBITRARY_OWNER_B_ADDRESS) + .expect("Couldn't set up test"), + ), + ]); + + let result = determine_check_type(&keys_changed); + + assert_matches!(result, Ok(None)); + } + } + + /// Test that escrowing Nam is accepted. + #[test] + fn test_escrow_nam_accepted() { + let mut writelog = WriteLog::default(); + let storage = setup_storage(); + // debit the user's balance + let account_key = balance_key( + &nam(), + &Address::decode(ARBITRARY_OWNER_A_ADDRESS).expect("Test failed"), + ); + writelog + .write( + &account_key, + Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE - ESCROW_AMOUNT) + .try_to_vec() + .expect("Test failed"), + ) + .expect("Test failed"); + + // credit the balance to the escrow + let escrow_key = balance_key(&nam(), ð_bridge::ADDRESS); + writelog + .write( + &escrow_key, + Amount::from( + BRIDGE_POOL_ESCROW_INITIAL_BALANCE + ESCROW_AMOUNT, + ) + .try_to_vec() + .expect("Test failed"), + ) + .expect("Test failed"); + + let keys_changed = BTreeSet::from([account_key, escrow_key]); + let verifiers = BTreeSet::from([BRIDGE_POOL_ADDRESS]); + + // set up the VP + let tx = Tx::new(vec![], None); + let vp = EthBridge { + ctx: setup_ctx(&tx, &storage, &writelog, &keys_changed, &verifiers), + }; + + let res = vp.validate_tx( + &tx.try_to_vec().expect("Test failed"), + &keys_changed, + &verifiers, + ); + assert!(res.expect("Test failed")); + } + + /// Test that escrowing must increase the balance + #[test] + fn test_escrowed_nam_must_increase() { + let mut writelog = WriteLog::default(); + let storage = setup_storage(); + // debit the user's balance + let account_key = balance_key( + &nam(), + &Address::decode(ARBITRARY_OWNER_A_ADDRESS).expect("Test failed"), + ); + writelog + .write( + &account_key, + Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE - ESCROW_AMOUNT) + .try_to_vec() + .expect("Test failed"), + ) + .expect("Test failed"); + + // do not credit the balance to the escrow + let escrow_key = balance_key(&nam(), ð_bridge::ADDRESS); + writelog + .write( + &escrow_key, + Amount::from(BRIDGE_POOL_ESCROW_INITIAL_BALANCE) + .try_to_vec() + .expect("Test failed"), + ) + .expect("Test failed"); + + let keys_changed = BTreeSet::from([account_key, escrow_key]); + let verifiers = BTreeSet::from([BRIDGE_POOL_ADDRESS]); + + // set up the VP + let tx = Tx::new(vec![], None); + let vp = EthBridge { + ctx: setup_ctx(&tx, &storage, &writelog, &keys_changed, &verifiers), + }; + + let res = vp.validate_tx( + &tx.try_to_vec().expect("Test failed"), + &keys_changed, + &verifiers, + ); + assert!(!res.expect("Test failed")); + } + + /// Test that the VP checks that the bridge pool vp will + /// be triggered if escrowing occurs. + #[test] + fn test_escrowing_must_trigger_bridge_pool_vp() { + let mut writelog = WriteLog::default(); + let storage = setup_storage(); + // debit the user's balance + let account_key = balance_key( + &nam(), + &Address::decode(ARBITRARY_OWNER_A_ADDRESS).expect("Test failed"), + ); + writelog + .write( + &account_key, + Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE - ESCROW_AMOUNT) + .try_to_vec() + .expect("Test failed"), + ) + .expect("Test failed"); + + // credit the balance to the escrow + let escrow_key = balance_key(&nam(), ð_bridge::ADDRESS); + writelog + .write( + &escrow_key, + Amount::from( + BRIDGE_POOL_ESCROW_INITIAL_BALANCE + ESCROW_AMOUNT, + ) + .try_to_vec() + .expect("Test failed"), + ) + .expect("Test failed"); + + let keys_changed = BTreeSet::from([account_key, escrow_key]); + let verifiers = BTreeSet::from([]); + + // set up the VP + let tx = Tx::new(vec![], None); + let vp = EthBridge { + ctx: setup_ctx(&tx, &storage, &writelog, &keys_changed, &verifiers), + }; + + let res = vp.validate_tx( + &tx.try_to_vec().expect("Test failed"), + &keys_changed, + &verifiers, + ); + assert!(!res.expect("Test failed")); + } +} diff --git a/shared/src/ledger/native_vp/mod.rs b/shared/src/ledger/native_vp/mod.rs index 8943c4f8d3..7b1be5678b 100644 --- a/shared/src/ledger/native_vp/mod.rs +++ b/shared/src/ledger/native_vp/mod.rs @@ -1,13 +1,15 @@ //! Native validity predicate interface associated with internal accounts such //! as the PoS and IBC modules. +pub mod ethereum_bridge; pub mod governance; pub mod parameters; pub mod slash_fund; - use std::cell::RefCell; use std::collections::BTreeSet; +use borsh::BorshDeserialize; +use eyre::WrapErr; pub use namada_core::ledger::vp_env::VpEnv; use super::storage_api::{self, ResultExt, StorageRead}; @@ -591,3 +593,103 @@ where self.post().iter_next(iter).map_err(Into::into) } } + +/// A convenience trait for reading and automatically deserializing a value from +/// storage +pub trait StorageReader { + /// If `maybe_bytes` is not empty, return an `Option` containing the + /// deserialization of the bytes inside `maybe_bytes`. + fn deserialize_if_present( + maybe_bytes: Option>, + ) -> eyre::Result> { + maybe_bytes + .map(|ref bytes| { + T::try_from_slice(bytes) + .wrap_err_with(|| "couldn't deserialize".to_string()) + }) + .transpose() + } + + /// Storage read prior state (before tx execution). It will try to read from + /// the storage. + fn read_pre_value( + &self, + key: &Key, + ) -> eyre::Result>; + + /// Storage read posterior state (after tx execution). It will try to read + /// from the write log first and if no entry found then from the + /// storage. + fn read_post_value( + &self, + key: &Key, + ) -> eyre::Result>; +} + +impl<'a, DB, H, CA> StorageReader for &Ctx<'a, DB, H, CA> +where + DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, + H: 'static + StorageHasher, + CA: 'static + WasmCacheAccess, +{ + /// Helper function. After reading posterior state, + /// borsh deserialize to specified type + fn read_post_value(&self, key: &Key) -> eyre::Result> + where + T: BorshDeserialize, + { + let maybe_bytes = Ctx::read_bytes_post(self, key) + .wrap_err_with(|| format!("couldn't read_bytes_post {}", key))?; + Self::deserialize_if_present(maybe_bytes) + } + + /// Helper function. After reading prior state, + /// borsh deserialize to specified type + fn read_pre_value(&self, key: &Key) -> eyre::Result> + where + T: BorshDeserialize, + { + let maybe_bytes = Ctx::read_bytes_pre(self, key) + .wrap_err_with(|| format!("couldn't read_bytes_pre {}", key))?; + Self::deserialize_if_present(maybe_bytes) + } +} + +#[cfg(any(test, feature = "testing"))] +pub(super) mod testing { + use std::collections::HashMap; + + use borsh::BorshDeserialize; + + use super::*; + + #[derive(Debug, Default)] + pub(in super::super) struct FakeStorageReader { + pre: HashMap>, + post: HashMap>, + } + + impl StorageReader for FakeStorageReader { + fn read_pre_value( + &self, + key: &Key, + ) -> eyre::Result> { + let bytes = match self.pre.get(key) { + Some(bytes) => bytes.to_owned(), + None => return Ok(None), + }; + Self::deserialize_if_present(Some(bytes)) + } + + fn read_post_value( + &self, + key: &Key, + ) -> eyre::Result> { + let bytes = match self.post.get(key) { + Some(bytes) => bytes.to_owned(), + None => return Ok(None), + }; + Self::deserialize_if_present(Some(bytes)) + } + } +} diff --git a/shared/src/ledger/pos/mod.rs b/shared/src/ledger/pos/mod.rs index 2878f89fb2..7a40e58110 100644 --- a/shared/src/ledger/pos/mod.rs +++ b/shared/src/ledger/pos/mod.rs @@ -2,8 +2,11 @@ pub mod vp; +use std::convert::TryFrom; + pub use namada_proof_of_stake; pub use namada_proof_of_stake::parameters::PosParams; +pub use namada_proof_of_stake::pos_queries::*; pub use namada_proof_of_stake::storage::*; pub use namada_proof_of_stake::types; use namada_proof_of_stake::PosBase; diff --git a/shared/src/ledger/pos/vp.rs b/shared/src/ledger/pos/vp.rs index e1b13648d0..fb1bcfae1b 100644 --- a/shared/src/ledger/pos/vp.rs +++ b/shared/src/ledger/pos/vp.rs @@ -5,6 +5,7 @@ use std::panic::{RefUnwindSafe, UnwindSafe}; use borsh::BorshDeserialize; use itertools::Itertools; +use namada_core::ledger::vp_env::VpEnv; pub use namada_proof_of_stake; pub use namada_proof_of_stake::parameters::PosParams; pub use namada_proof_of_stake::types::{self, Slash, Slashes, ValidatorStates}; @@ -30,6 +31,7 @@ use crate::ledger::pos::{ is_validator_address_raw_hash_key, is_validator_commission_rate_key, is_validator_consensus_key_key, is_validator_max_commission_rate_change_key, is_validator_state_key, + validator_eth_cold_key_key, validator_eth_hot_key_key, }; use crate::ledger::storage::{self as ledger_storage, StorageHasher}; use crate::ledger::storage_api::StorageRead; @@ -128,18 +130,18 @@ where ) .map_err(Error::NativeVpError); } else if is_validator_set_key(key) { - let pre = self.ctx.pre().read_bytes(key)?.and_then(|bytes| { + let pre = self.ctx.read_bytes_pre(key)?.and_then(|bytes| { ValidatorSets::try_from_slice(&bytes[..]).ok() }); - let post = self.ctx.post().read_bytes(key)?.and_then(|bytes| { + let post = self.ctx.read_bytes_post(key)?.and_then(|bytes| { ValidatorSets::try_from_slice(&bytes[..]).ok() }); changes.push(ValidatorSet(Data { pre, post })); } else if let Some(validator) = is_validator_state_key(key) { - let pre = self.ctx.pre().read_bytes(key)?.and_then(|bytes| { + let pre = self.ctx.read_bytes_pre(key)?.and_then(|bytes| { ValidatorStates::try_from_slice(&bytes[..]).ok() }); - let post = self.ctx.post().read_bytes(key)?.and_then(|bytes| { + let post = self.ctx.read_bytes_post(key)?.and_then(|bytes| { ValidatorStates::try_from_slice(&bytes[..]).ok() }); changes.push(Validator { @@ -148,10 +150,10 @@ where }); } else if let Some(validator) = is_validator_consensus_key_key(key) { - let pre = self.ctx.pre().read_bytes(key)?.and_then(|bytes| { + let pre = self.ctx.read_bytes_pre(key)?.and_then(|bytes| { ValidatorConsensusKeys::try_from_slice(&bytes[..]).ok() }); - let post = self.ctx.post().read_bytes(key)?.and_then(|bytes| { + let post = self.ctx.read_bytes_post(key)?.and_then(|bytes| { ValidatorConsensusKeys::try_from_slice(&bytes[..]).ok() }); changes.push(Validator { @@ -172,14 +174,14 @@ where } else if let Some(raw_hash) = is_validator_address_raw_hash_key(key) { - let pre = - self.ctx.pre().read_bytes(key)?.and_then(|bytes| { - Address::try_from_slice(&bytes[..]).ok() - }); - let post = - self.ctx.post().read_bytes(key)?.and_then(|bytes| { - Address::try_from_slice(&bytes[..]).ok() - }); + let pre = self + .ctx + .read_bytes_pre(key)? + .and_then(|bytes| Address::try_from_slice(&bytes[..]).ok()); + let post = self + .ctx + .read_bytes_post(key)? + .and_then(|bytes| Address::try_from_slice(&bytes[..]).ok()); changes.push(ValidatorAddressRawHash { raw_hash: raw_hash.to_string(), data: Data { pre, post }, @@ -190,27 +192,26 @@ where if owner != &addr { continue; } - let pre = self.ctx.pre().read_bytes(key)?.and_then(|bytes| { + let pre = self.ctx.read_bytes_pre(key)?.and_then(|bytes| { token::Amount::try_from_slice(&bytes[..]).ok() }); - let post = self.ctx.post().read_bytes(key)?.and_then(|bytes| { + let post = self.ctx.read_bytes_post(key)?.and_then(|bytes| { token::Amount::try_from_slice(&bytes[..]).ok() }); changes.push(Balance(Data { pre, post })); } else if let Some(bond_id) = is_bond_key(key) { - let pre = - self.ctx.pre().read_bytes(key)?.and_then(|bytes| { - Bonds::try_from_slice(&bytes[..]).ok() - }); - let post = - self.ctx.post().read_bytes(key)?.and_then(|bytes| { - Bonds::try_from_slice(&bytes[..]).ok() - }); + let pre = self + .ctx + .read_bytes_pre(key)? + .and_then(|bytes| Bonds::try_from_slice(&bytes[..]).ok()); + let post = self + .ctx + .read_bytes_post(key)? + .and_then(|bytes| Bonds::try_from_slice(&bytes[..]).ok()); // For bonds, we need to look-up slashes let slashes = self .ctx - .pre() - .read_bytes(&validator_slashes_key(&bond_id.validator))? + .read_bytes_pre(&validator_slashes_key(&bond_id.validator))? .and_then(|bytes| Slashes::try_from_slice(&bytes[..]).ok()) .unwrap_or_default(); changes.push(Bond { @@ -219,19 +220,20 @@ where slashes, }); } else if let Some(unbond_id) = is_unbond_key(key) { - let pre = - self.ctx.pre().read_bytes(key)?.and_then(|bytes| { - Unbonds::try_from_slice(&bytes[..]).ok() - }); - let post = - self.ctx.post().read_bytes(key)?.and_then(|bytes| { - Unbonds::try_from_slice(&bytes[..]).ok() - }); + let pre = self + .ctx + .read_bytes_pre(key)? + .and_then(|bytes| Unbonds::try_from_slice(&bytes[..]).ok()); + let post = self + .ctx + .read_bytes_post(key)? + .and_then(|bytes| Unbonds::try_from_slice(&bytes[..]).ok()); // For unbonds, we need to look-up slashes let slashes = self .ctx - .pre() - .read_bytes(&validator_slashes_key(&unbond_id.validator))? + .read_bytes_pre(&validator_slashes_key( + &unbond_id.validator, + ))? .and_then(|bytes| Slashes::try_from_slice(&bytes[..]).ok()) .unwrap_or_default(); changes.push(Unbond { diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index edb801700b..244db4f4b9 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -1,13 +1,15 @@ //! The ledger's protocol + use std::collections::BTreeSet; use std::panic; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use thiserror::Error; -use crate::ledger::eth_bridge::vp::EthBridge; use crate::ledger::gas::{self, BlockGasMeter, VpGasMeter}; use crate::ledger::ibc::vp::{Ibc, IbcToken}; +use crate::ledger::native_vp::ethereum_bridge::bridge_pool_vp::BridgePoolVp; +use crate::ledger::native_vp::ethereum_bridge::vp::EthBridge; use crate::ledger::native_vp::governance::GovernanceVp; use crate::ledger::native_vp::parameters::{self, ParametersVp}; use crate::ledger::native_vp::slash_fund::SlashFundVp; @@ -19,6 +21,7 @@ use crate::proto::{self, Tx}; use crate::types::address::{Address, InternalAddress}; use crate::types::storage; use crate::types::storage::TxIndex; +use crate::types::transaction::protocol::{ProtocolTx, ProtocolTxType}; use crate::types::transaction::{DecryptedTx, TxResult, TxType, VpsResult}; use crate::vm::wasm::{TxCache, VpCache}; use crate::vm::{self, wasm, WasmCacheAccess}; @@ -32,6 +35,8 @@ pub enum Error { TxDecodingError(proto::Error), #[error("Transaction runner error: {0}")] TxRunnerError(vm::wasm::run::Error), + #[error(transparent)] + ProtocolTxError(#[from] eyre::Error), #[error("Txs must either be encrypted or a decryption of an encrypted tx")] TxTypeError, #[error("Gas error: {0}")] @@ -55,87 +60,198 @@ pub enum Error { #[error("SlashFund native VP error: {0}")] SlashFundNativeVpError(crate::ledger::native_vp::slash_fund::Error), #[error("Ethereum bridge native VP error: {0}")] - EthBridgeNativeVpError(crate::ledger::eth_bridge::vp::Error), + EthBridgeNativeVpError(native_vp::ethereum_bridge::vp::Error), + #[error("Ethereum bridge pool native VP error: {0}")] + BridgePoolNativeVpError(native_vp::ethereum_bridge::bridge_pool_vp::Error), #[error("Access to an internal address {0} is forbidden")] AccessForbidden(InternalAddress), } +#[allow(missing_docs)] +pub struct ShellParams<'a, D, H, CA> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + CA: 'static + WasmCacheAccess + Sync, +{ + pub block_gas_meter: &'a mut BlockGasMeter, + pub write_log: &'a mut WriteLog, + pub storage: &'a Storage, + pub vp_wasm_cache: &'a mut VpCache, + pub tx_wasm_cache: &'a mut TxCache, +} + /// Result of applying a transaction pub type Result = std::result::Result; -/// Apply a given transaction -/// -/// The only Tx Types that should be input here are `Decrypted` and `Wrapper` +/// Dispatch a given transaction to be applied based on its type. Some storage +/// updates may be derived and applied natively rather than via the wasm +/// environment, in which case validity predicates will be bypassed. /// /// If the given tx is a successfully decrypted payload apply the necessary /// vps. Otherwise, we include the tx on chain with the gas charge added /// but no further validations. #[allow(clippy::too_many_arguments)] -pub fn apply_tx( - tx: TxType, +pub fn dispatch_tx<'a, D, H, CA>( + tx_type: TxType, tx_length: usize, tx_index: TxIndex, - block_gas_meter: &mut BlockGasMeter, - write_log: &mut WriteLog, - storage: &Storage, - vp_wasm_cache: &mut VpCache, - tx_wasm_cache: &mut TxCache, + block_gas_meter: &'a mut BlockGasMeter, + write_log: &'a mut WriteLog, + storage: &'a mut Storage, + vp_wasm_cache: &'a mut VpCache, + tx_wasm_cache: &'a mut TxCache, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, { - // Base gas cost for applying the tx - block_gas_meter - .add_base_transaction_fee(tx_length) - .map_err(Error::GasError)?; - match tx { + match tx_type { TxType::Raw(_) => Err(Error::TxTypeError), - TxType::Decrypted(DecryptedTx::Decrypted(tx)) => { - let verifiers = execute_tx( - &tx, - &tx_index, - storage, + TxType::Decrypted(DecryptedTx::Decrypted(tx)) => apply_wasm_tx( + tx, + tx_length, + &tx_index, + ShellParams { block_gas_meter, write_log, + storage, vp_wasm_cache, tx_wasm_cache, - )?; + }, + ), + TxType::Protocol(ProtocolTx { tx, .. }) => { + apply_protocol_tx(tx, storage) + } + _ => { + // other transaction types we treat as a noop + Ok(TxResult::default()) + } + } +} - let vps_result = check_vps( - &tx, - &tx_index, - storage, - block_gas_meter, - write_log, - &verifiers, - vp_wasm_cache, - )?; +/// Apply a transaction going via the wasm environment. Gas will be metered and +/// validity predicates will be triggered in the normal way. +pub(crate) fn apply_wasm_tx<'a, D, H, CA>( + tx: Tx, + tx_length: usize, + tx_index: &TxIndex, + ShellParams { + block_gas_meter, + write_log, + storage, + vp_wasm_cache, + tx_wasm_cache, + }: ShellParams<'a, D, H, CA>, +) -> Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + CA: 'static + WasmCacheAccess + Sync, +{ + // Base gas cost for applying the tx + block_gas_meter + .add_base_transaction_fee(tx_length) + .map_err(Error::GasError)?; + let verifiers = execute_tx( + &tx, + tx_index, + storage, + block_gas_meter, + write_log, + vp_wasm_cache, + tx_wasm_cache, + )?; + + let vps_result = check_vps( + &tx, + tx_index, + storage, + block_gas_meter, + write_log, + &verifiers, + vp_wasm_cache, + )?; - let gas_used = block_gas_meter - .finalize_transaction() - .map_err(Error::GasError)?; - let initialized_accounts = write_log.get_initialized_accounts(); - let changed_keys = write_log.get_keys(); - let ibc_event = write_log.take_ibc_event(); + let gas_used = block_gas_meter + .finalize_transaction() + .map_err(Error::GasError)?; + let initialized_accounts = write_log.get_initialized_accounts(); + let changed_keys = write_log.get_keys(); + let ibc_event = write_log.take_ibc_event(); + + Ok(TxResult { + gas_used, + changed_keys, + vps_result, + initialized_accounts, + ibc_event, + }) +} - Ok(TxResult { - gas_used, - changed_keys, - vps_result, - initialized_accounts, - ibc_event, - }) +/// Apply a derived transaction to storage based on some protocol transaction. +/// The logic here must be completely deterministic and will be executed by all +/// full nodes every time a protocol transaction is included in a block. Storage +/// is updated natively rather than via the wasm environment, so gas does not +/// need to be metered and validity predicates are bypassed. A [`TxResult`] +/// containing changed keys and the like should be returned in the normal way. +pub(crate) fn apply_protocol_tx( + tx: ProtocolTxType, + storage: &mut Storage, +) -> Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + use namada_ethereum_bridge::protocol::transactions; + + use crate::types::vote_extensions::{ + ethereum_events, validator_set_update, + }; + + match tx { + ProtocolTxType::EthEventsVext(ext) => { + let ethereum_events::VextDigest { events, .. } = + ethereum_events::VextDigest::singleton(ext); + transactions::ethereum_events::apply_derived_tx(storage, events) + .map_err(Error::ProtocolTxError) + } + ProtocolTxType::ValSetUpdateVext(ext) => { + // NOTE(feature = "abcipp"): we will not need to apply any + // storage changes when we rollback to ABCI++; this is because + // the decided vote extension digest should have >2/3 of the + // voting power already, which is the whole reason why we + // have to apply state updates with `abciplus` - we need + // to aggregate votes consisting of >2/3 of the voting power + // on a validator set update. + // + // we could, however, emit some kind of event, notifying a + // relayer process of a newly available validator set update; + // for this, we need to receive a mutable reference to the + // event log, in `apply_protocol_tx()` + transactions::validator_set_update::aggregate_votes( + storage, + validator_set_update::VextDigest::singleton(ext), + ) + .map_err(Error::ProtocolTxError) + } + ProtocolTxType::EthereumEvents(_) + | ProtocolTxType::ValidatorSetUpdate(_) => { + // TODO(namada#198): implement this + tracing::warn!( + "Attempt made to apply an unimplemented protocol transaction, \ + no actions will be taken" + ); + Ok(TxResult::default()) } _ => { - let gas_used = block_gas_meter - .finalize_transaction() - .map_err(Error::GasError)?; - Ok(TxResult { - gas_used, - ..Default::default() - }) + tracing::error!( + "Attempt made to apply an unsupported protocol transaction! - \ + {:#?}", + tx + ); + Err(Error::TxTypeError) } } } @@ -370,6 +486,14 @@ where gas_meter = bridge.ctx.gas_meter.into_inner(); result } + InternalAddress::EthBridgePool => { + let bridge_pool = BridgePoolVp { ctx }; + let result = bridge_pool + .validate_tx(tx_data, &keys_changed, &verifiers) + .map_err(Error::BridgePoolNativeVpError); + gas_meter = bridge_pool.ctx.gas_meter.into_inner(); + result + } }; accepted diff --git a/shared/src/ledger/queries/mod.rs b/shared/src/ledger/queries/mod.rs index 4644909b1a..107ff91a98 100644 --- a/shared/src/ledger/queries/mod.rs +++ b/shared/src/ledger/queries/mod.rs @@ -9,7 +9,8 @@ pub use types::{ }; use vp::{Vp, VP}; -use super::storage::{DBIter, StorageHasher, DB}; +use super::storage::traits::StorageHasher; +use super::storage::{DBIter, DB}; use super::storage_api; use crate::types::storage::BlockHeight; @@ -238,8 +239,11 @@ mod testing { tx_wasm_cache: self.tx_wasm_cache.clone(), storage_read_past_height_limit: None, }; - let response = self.rpc.handle(ctx, &request).unwrap(); - Ok(response) + // TODO: this is a hack to propagate errors to the caller, we should + // really permit error types other than [`std::io::Error`] + self.rpc.handle(ctx, &request).map_err(|err| { + std::io::Error::new(std::io::ErrorKind::Other, err.to_string()) + }) } } } diff --git a/shared/src/ledger/queries/shell.rs b/shared/src/ledger/queries/shell.rs index 4883294c78..15ef3c2084 100644 --- a/shared/src/ledger/queries/shell.rs +++ b/shared/src/ledger/queries/shell.rs @@ -2,6 +2,10 @@ use borsh::{BorshDeserialize, BorshSerialize}; use masp_primitives::asset_type::AssetType; use masp_primitives::merkle_tree::MerklePath; use masp_primitives::sapling::Node; +use namada_core::ledger::eth_bridge::storage::bridge_pool::{ + get_key_from_hash, get_signed_root_key, +}; +use namada_core::ledger::storage::merkle_tree::StoreRef; use namada_core::types::address::Address; use namada_core::types::hash::Hash; use namada_core::types::storage::BlockResults; @@ -11,9 +15,15 @@ use crate::ledger::events::Event; use crate::ledger::queries::types::{RequestCtx, RequestQuery}; use crate::ledger::queries::{require_latest_height, EncodedResponseQuery}; use crate::ledger::storage::traits::StorageHasher; -use crate::ledger::storage::{DBIter, DB}; -use crate::ledger::storage_api::{self, ResultExt, StorageRead}; +use crate::ledger::storage::{DBIter, MerkleTree, StoreType, DB}; +use crate::ledger::storage_api::{self, CustomError, ResultExt, StorageRead}; use crate::tendermint::merkle::proof::Proof; +use crate::types::eth_abi::EncodeCell; +use crate::types::eth_bridge_pool::{ + MultiSignedMerkleRoot, PendingTransfer, RelayProof, +}; +use crate::types::keccak::KeccakHash; +use crate::types::storage::MembershipProof::BridgePool; use crate::types::storage::{self, Epoch, PrefixValue}; #[cfg(any(test, feature = "async-client"))] use crate::types::transaction::TxResult; @@ -44,18 +54,27 @@ router! {SHELL, ( "has_key" / [storage_key: storage::Key] ) -> bool = storage_has_key, - // Block results access - read bit-vec - ( "results" ) -> Vec = read_results, - // Conversion state access - read conversion ( "conv" / [asset_type: AssetType] ) -> Conversion = read_conversion, + // Block results access - read bit-vec + ( "results" ) -> Vec = read_results, + // was the transaction accepted? ( "accepted" / [tx_hash: Hash] ) -> Option = accepted, // was the transaction applied? ( "applied" / [tx_hash: Hash] ) -> Option = applied, + // Get the current contents of the Ethereum bridge pool + ( "eth_bridge_pool" / "contents" ) + -> Vec = read_ethereum_bridge_pool, + + // Generate a merkle proof for the inclusion of requested + // transfers in the Ethereum bridge pool + ( "eth_bridge_pool" / "proof" ) + -> EncodeCell = (with_options generate_bridge_pool_proof), + } // Handlers: @@ -70,25 +89,25 @@ where H: 'static + StorageHasher + Sync, { use crate::ledger::gas::BlockGasMeter; - use crate::ledger::protocol; + use crate::ledger::protocol::{self, ShellParams}; use crate::ledger::storage::write_log::WriteLog; use crate::proto::Tx; use crate::types::storage::TxIndex; - use crate::types::transaction::{DecryptedTx, TxType}; let mut gas_meter = BlockGasMeter::default(); let mut write_log = WriteLog::default(); let tx = Tx::try_from(&request.data[..]).into_storage_result()?; - let tx = TxType::Decrypted(DecryptedTx::Decrypted(tx)); - let data = protocol::apply_tx( + let data = protocol::apply_wasm_tx( tx, request.data.len(), - TxIndex(0), - &mut gas_meter, - &mut write_log, - ctx.storage, - &mut ctx.vp_wasm_cache, - &mut ctx.tx_wasm_cache, + &TxIndex(0), + ShellParams { + block_gas_meter: &mut gas_meter, + write_log: &mut write_log, + storage: ctx.storage, + vp_wasm_cache: &mut ctx.vp_wasm_cache, + tx_wasm_cache: &mut ctx.tx_wasm_cache, + }, ) .into_storage_result()?; let data = data.try_to_vec().into_storage_result()?; @@ -259,7 +278,7 @@ where let proof = if request.prove { let mut ops = vec![]; for PrefixValue { key, value } in &data { - let mut proof: crate::tendermint::merkle::proof::Proof = ctx + let mut proof: Proof = ctx .storage .get_existence_proof(key, value, request.height) .into_storage_result()?; @@ -325,18 +344,170 @@ where .cloned()) } +/// Read the current contents of the Ethereum bridge +/// pool. +fn read_ethereum_bridge_pool( + ctx: RequestCtx<'_, D, H>, +) -> storage_api::Result> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let stores = ctx + .storage + .db + .read_merkle_tree_stores(ctx.storage.last_height) + .expect("We should always be able to read the database") + .expect( + "Every signed root should correspond to an existing block height", + ); + let store = match stores.get_store(StoreType::BridgePool) { + StoreRef::BridgePool(store) => store, + _ => unreachable!(), + }; + + let transfers: Vec = store + .iter() + .map(|hash| { + let res = ctx + .storage + .read(&get_key_from_hash(hash)) + .unwrap() + .0 + .unwrap(); + BorshDeserialize::try_from_slice(res.as_slice()).unwrap() + }) + .collect(); + Ok(transfers) +} + +/// Generate a merkle proof for the inclusion of the +/// requested transfers in the Ethereum bridge pool. +fn generate_bridge_pool_proof( + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + if let Ok(transfer_hashes) = + >::try_from_slice(request.data.as_slice()) + { + // get the latest signed merkle root of the Ethereum bridge pool + let signed_root: MultiSignedMerkleRoot = match ctx + .storage + .read(&get_signed_root_key()) + .expect("Reading the database should not faile") + { + (Some(bytes), _) => { + BorshDeserialize::try_from_slice(bytes.as_slice()).unwrap() + } + _ => { + return Err(storage_api::Error::SimpleMessage( + "No signed root for the Ethereum bridge pool exists in \ + storage.", + )); + } + }; + + // get the merkle tree corresponding to the above root. + let tree = MerkleTree::::new( + ctx.storage + .db + .read_merkle_tree_stores(signed_root.height) + .expect("We should always be able to read the database") + .expect( + "Every signed root should correspond to an existing block \ + height", + ), + ); + // from the hashes of the transfers, get the actual values. + let mut missing_hashes = vec![]; + let (keys, values): (Vec<_>, Vec<_>) = transfer_hashes + .iter() + .filter_map(|hash| { + let key = get_key_from_hash(hash); + match ctx.storage.read(&key) { + Ok((Some(bytes), _)) => Some((key, bytes)), + _ => { + missing_hashes.push(hash); + None + } + } + }) + .unzip(); + if !missing_hashes.is_empty() { + return Err(storage_api::Error::Custom(CustomError( + format!( + "One or more of the provided hashes had no corresponding \ + transfer in storage: {:?}", + missing_hashes + ) + .into(), + ))); + } + // get the membership proof + match tree.get_sub_tree_existence_proof( + &keys, + values.iter().map(|v| v.as_slice()).collect(), + ) { + Ok(BridgePool(proof)) => { + let data = EncodeCell::new(&RelayProof { + // TODO: use actual validators + validator_args: Default::default(), + root: signed_root, + proof, + // TODO: Use real nonce + nonce: 0.into(), + }) + .try_to_vec() + .into_storage_result()?; + Ok(EncodedResponseQuery { + data, + ..Default::default() + }) + } + Err(e) => Err(storage_api::Error::new(e)), + _ => unreachable!(), + } + } else { + Err(storage_api::Error::SimpleMessage( + "Could not deserialize transfers", + )) + } +} + #[cfg(test)] mod test { - use borsh::BorshDeserialize; + use std::collections::BTreeSet; + + use borsh::{BorshDeserialize, BorshSerialize}; + use namada_core::ledger::eth_bridge::storage::bridge_pool::{ + get_pending_key, get_signed_root_key, BridgePoolTree, + }; use crate::ledger::queries::testing::TestClient; use crate::ledger::queries::RPC; use crate::ledger::storage_api::{self, StorageWrite}; use crate::proto::Tx; + use crate::types::address::Address; + use crate::types::eth_abi::Encode; + use crate::types::eth_bridge_pool::{ + GasFee, MultiSignedMerkleRoot, PendingTransfer, RelayProof, + TransferToEthereum, + }; + use crate::types::ethereum_events::EthAddress; use crate::types::{address, token}; const TX_NO_OP_WASM: &str = "../wasm_for_tests/tx_no_op.wasm"; + /// An established user address for testing & development + fn bertha_address() -> Address { + Address::decode("atest1v4ehgw36xvcyyvejgvenxs34g3zygv3jxqunjd6rxyeyys3sxy6rwvfkx4qnj33hg9qnvse4lsfctw") + .expect("The token address decoding shouldn't fail") + } + #[test] fn test_shell_queries_router_paths() { let path = RPC.shell().epoch_path(); @@ -442,4 +613,279 @@ mod test { Ok(()) } + + /// Test that reading the bridge pool works + #[tokio::test] + async fn test_read_bridge_pool() { + let mut client = TestClient::new(RPC); + + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([0; 20]), + recipient: EthAddress([0; 20]), + sender: bertha_address(), + amount: 0.into(), + nonce: 0.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + + // write a transfer into the bridge pool + client + .storage + .write( + &get_pending_key(&transfer), + transfer.try_to_vec().expect("Test failed"), + ) + .expect("Test failed"); + + // commit the changes and increase block height + client.storage.commit().expect("Test failed"); + client.storage.block.height = client.storage.block.height + 1; + + // check the response + let pool = RPC + .shell() + .read_ethereum_bridge_pool(&client) + .await + .unwrap(); + assert_eq!(pool, Vec::from([transfer])); + } + + /// Test that reading the bridge pool always gets + /// the latest pool + #[tokio::test] + async fn test_bridge_pool_updates() { + let mut client = TestClient::new(RPC); + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([0; 20]), + recipient: EthAddress([0; 20]), + sender: bertha_address(), + amount: 0.into(), + nonce: 0.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + + // write a transfer into the bridge pool + client + .storage + .write( + &get_pending_key(&transfer), + transfer.try_to_vec().expect("Test failed"), + ) + .expect("Test failed"); + + // commit the changes and increase block height + client.storage.commit().expect("Test failed"); + client.storage.block.height = client.storage.block.height + 1; + + // update the pool + client + .storage + .delete(&get_pending_key(&transfer)) + .expect("Test failed"); + let mut transfer2 = transfer; + transfer2.transfer.amount = 1.into(); + client + .storage + .write( + &get_pending_key(&transfer2), + transfer2.try_to_vec().expect("Test failed"), + ) + .expect("Test failed"); + + // commit the changes and increase block height + client.storage.commit().expect("Test failed"); + client.storage.block.height = client.storage.block.height + 1; + + // check the response + let pool = RPC + .shell() + .read_ethereum_bridge_pool(&client) + .await + .unwrap(); + assert_eq!(pool, Vec::from([transfer2])); + } + + /// Test that we can get a merkle proof even if the signed + /// merkle roots is lagging behind the pool + #[tokio::test] + async fn test_get_merkle_proof() { + let mut client = TestClient::new(RPC); + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([0; 20]), + recipient: EthAddress([0; 20]), + sender: bertha_address(), + amount: 0.into(), + nonce: 0.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + + // write a transfer into the bridge pool + client + .storage + .write( + &get_pending_key(&transfer), + transfer.try_to_vec().expect("Test failed"), + ) + .expect("Test failed"); + + // create a signed Merkle root for this pool + let signed_root = MultiSignedMerkleRoot { + sigs: Default::default(), + root: transfer.keccak256(), + height: Default::default(), + }; + + // commit the changes and increase block height + client.storage.commit().expect("Test failed"); + client.storage.block.height = client.storage.block.height + 1; + + // update the pool + let mut transfer2 = transfer.clone(); + transfer2.transfer.amount = 1.into(); + client + .storage + .write( + &get_pending_key(&transfer2), + transfer2.try_to_vec().expect("Test failed"), + ) + .expect("Test failed"); + + // add the signature for the pool at the previous block height + client + .storage + .write(&get_signed_root_key(), signed_root.try_to_vec().unwrap()) + .expect("Test failed"); + + // commit the changes and increase block height + client.storage.commit().expect("Test failed"); + client.storage.block.height = client.storage.block.height + 1; + + let resp = RPC + .shell() + .generate_bridge_pool_proof( + &client, + Some( + vec![transfer.keccak256()] + .try_to_vec() + .expect("Test failed"), + ), + None, + false, + ) + .await + .unwrap(); + + let tree = BridgePoolTree::new( + transfer.keccak256(), + BTreeSet::from([transfer.keccak256()]), + ); + let proof = tree + .get_membership_proof(vec![transfer]) + .expect("Test failed"); + + let proof = RelayProof { + validator_args: Default::default(), + root: signed_root, + proof, + // TODO: Use a real nonce + nonce: 0.into(), + } + .encode() + .into_inner(); + assert_eq!(proof, resp.data.into_inner()); + } + + /// Test if the no merkle tree including a transfer + /// has had its root signed, then we cannot generate + /// a proof. + #[tokio::test] + async fn test_cannot_get_proof() { + let mut client = TestClient::new(RPC); + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: EthAddress([0; 20]), + recipient: EthAddress([0; 20]), + sender: bertha_address(), + amount: 0.into(), + nonce: 0.into(), + }, + gas_fee: GasFee { + amount: 0.into(), + payer: bertha_address(), + }, + }; + + // write a transfer into the bridge pool + client + .storage + .write( + &get_pending_key(&transfer), + transfer.try_to_vec().expect("Test failed"), + ) + .expect("Test failed"); + + // create a signed Merkle root for this pool + let signed_root = MultiSignedMerkleRoot { + sigs: Default::default(), + root: transfer.keccak256(), + height: Default::default(), + }; + + // commit the changes and increase block height + client.storage.commit().expect("Test failed"); + client.storage.block.height = client.storage.block.height + 1; + + // update the pool + let mut transfer2 = transfer; + transfer2.transfer.amount = 1.into(); + client + .storage + .write( + &get_pending_key(&transfer2), + transfer2.try_to_vec().expect("Test failed"), + ) + .expect("Test failed"); + + // add the signature for the pool at the previous block height + client + .storage + .write(&get_signed_root_key(), signed_root.try_to_vec().unwrap()) + .expect("Test failed"); + + // commit the changes and increase block height + client.storage.commit().expect("Test failed"); + client.storage.block.height = client.storage.block.height + 1; + + // this is in the pool, but its merkle root has not been signed yet + let resp = RPC + .shell() + .generate_bridge_pool_proof( + &client, + Some( + vec![transfer2.keccak256()] + .try_to_vec() + .expect("Test failed"), + ), + None, + false, + ) + .await; + // thus proof generation should fail + assert!(resp.is_err()); + } } diff --git a/shared/src/ledger/storage/write_log.rs b/shared/src/ledger/storage/write_log.rs index 6e78612f56..667164eb40 100644 --- a/shared/src/ledger/storage/write_log.rs +++ b/shared/src/ledger/storage/write_log.rs @@ -6,7 +6,8 @@ use std::collections::{BTreeSet, HashMap, HashSet}; use thiserror::Error; use crate::ledger; -use crate::ledger::storage::{Storage, StorageHasher}; +use crate::ledger::storage::traits::StorageHasher; +use crate::ledger::storage::Storage; use crate::types::address::{Address, EstablishedAddressGen}; use crate::types::ibc::IbcEvent; use crate::types::storage; diff --git a/shared/src/ledger/vp_host_fns.rs b/shared/src/ledger/vp_host_fns.rs index 8fc013075a..16c85b09b4 100644 --- a/shared/src/ledger/vp_host_fns.rs +++ b/shared/src/ledger/vp_host_fns.rs @@ -12,8 +12,9 @@ use thiserror::Error; use super::gas::MIN_STORAGE_GAS; use crate::ledger::gas; use crate::ledger::gas::VpGasMeter; +use crate::ledger::storage::traits::StorageHasher; use crate::ledger::storage::write_log::WriteLog; -use crate::ledger::storage::{self, write_log, Storage, StorageHasher}; +use crate::ledger::storage::{self, write_log, Storage}; use crate::proto::Tx; /// These runtime errors will abort VP execution immediately diff --git a/shared/src/types/mod.rs b/shared/src/types/mod.rs index 1b73329efe..1832e51ce9 100644 --- a/shared/src/types/mod.rs +++ b/shared/src/types/mod.rs @@ -4,6 +4,7 @@ pub mod ibc; pub mod key; pub use namada_core::types::{ - address, chain, governance, hash, internal, masp, storage, time, token, - transaction, validity_predicate, + address, chain, eth_abi, eth_bridge_pool, ethereum_events, governance, + hash, internal, keccak, masp, storage, time, token, transaction, + validity_predicate, vote_extensions, voting_power, }; diff --git a/shared/src/vm/host_env.rs b/shared/src/vm/host_env.rs index 90e6e4405f..a452f52d7a 100644 --- a/shared/src/vm/host_env.rs +++ b/shared/src/vm/host_env.rs @@ -1822,22 +1822,26 @@ where EVAL: VpEvaluator, CA: WasmCacheAccess, { - use masp_primitives::transaction::Transaction; - use crate::types::token::Transfer; + let gas_meter = unsafe { env.ctx.gas_meter.get() }; let (tx_bytes, gas) = env .memory .read_bytes(tx_ptr, tx_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; vp_host_fns::add_gas(gas_meter, gas)?; + let full_tx: Transfer = - BorshDeserialize::try_from_slice(tx_bytes.as_slice()).unwrap(); - let shielded_tx: Transaction = full_tx.shielded.unwrap(); - Ok(HostEnvResult::from(crate::ledger::masp::verify_shielded_tx( - &shielded_tx, - )) - .to_i64()) + BorshDeserialize::try_from_slice(tx_bytes.as_slice()) + .map_err(vp_host_fns::RuntimeError::EncodingError)?; + + match full_tx.shielded { + Some(shielded_tx) => Ok(HostEnvResult::from( + crate::ledger::masp::verify_shielded_tx(&shielded_tx), + ) + .to_i64()), + None => Ok(HostEnvResult::Fail.to_i64()), + } } /// Log a string from exposed to the wasm VM Tx environment. The message will be @@ -1963,7 +1967,8 @@ pub mod testing { use std::collections::BTreeSet; use super::*; - use crate::ledger::storage::{self, StorageHasher}; + use crate::ledger::storage::traits::StorageHasher; + use crate::ledger::storage::{self}; use crate::vm::memory::testing::NativeMemory; /// Setup a transaction environment diff --git a/shared/src/vm/wasm/host_env.rs b/shared/src/vm/wasm/host_env.rs index e4e173f47c..b355210a5b 100644 --- a/shared/src/vm/wasm/host_env.rs +++ b/shared/src/vm/wasm/host_env.rs @@ -8,7 +8,8 @@ use wasmer::{ WasmerEnv, }; -use crate::ledger::storage::{self, StorageHasher}; +use crate::ledger::storage::traits::StorageHasher; +use crate::ledger::storage::{self}; use crate::vm::host_env::{TxVmEnv, VpEvaluator, VpVmEnv}; use crate::vm::wasm::memory::WasmMemory; use crate::vm::{host_env, WasmCacheAccess}; diff --git a/shared/src/vm/wasm/run.rs b/shared/src/vm/wasm/run.rs index 103f92a75c..204d08aa21 100644 --- a/shared/src/vm/wasm/run.rs +++ b/shared/src/vm/wasm/run.rs @@ -11,8 +11,9 @@ use wasmer::BaseTunables; use super::memory::{Limit, WasmMemory}; use super::TxCache; use crate::ledger::gas::{BlockGasMeter, VpGasMeter}; +use crate::ledger::storage::traits::StorageHasher; use crate::ledger::storage::write_log::WriteLog; -use crate::ledger::storage::{self, Storage, StorageHasher}; +use crate::ledger::storage::{self, Storage}; use crate::proto::Tx; use crate::types::address::Address; use crate::types::internal::HostEnvResult; diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 4403453dc9..a0d4bcd26b 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_tests" resolver = "2" -version = "0.12.0" +version = "0.12.1" [features] default = ["abciplus", "wasm-runtime"] @@ -43,7 +43,7 @@ rust_decimal = "1.26.1" rust_decimal_macros = "1.26.1" [dev-dependencies] -namada_apps = {path = "../apps", default-features = false, features = ["testing"]} +namada_apps = {path = "../apps", default-features = false, features = ["abciplus", "testing"]} assert_cmd = "1.0.7" borsh = "0.9.1" color-eyre = "0.5.11" diff --git a/tests/src/e2e/eth_bridge_tests.rs b/tests/src/e2e/eth_bridge_tests.rs index 7cc1bd6aee..43a2ca0f3e 100644 --- a/tests/src/e2e/eth_bridge_tests.rs +++ b/tests/src/e2e/eth_bridge_tests.rs @@ -1,7 +1,16 @@ +use color_eyre::eyre::Result; +use namada::ledger::eth_bridge::{ + Contracts, EthereumBridgeConfig, UpgradeableContract, +}; +use namada::types::address::wnam; +use namada::types::ethereum_events::EthAddress; +use namada_apps::config::ethereum_bridge; + +use super::setup::set_ethereum_bridge_mode; use crate::e2e::helpers::get_actor_rpc; use crate::e2e::setup; use crate::e2e::setup::constants::{ - wasm_abs_path, ALBERT, TX_WRITE_STORAGE_KEY_WASM, + wasm_abs_path, ALBERT, BERTHA, NAM, TX_WRITE_STORAGE_KEY_WASM, }; use crate::e2e::setup::{Bin, Who}; use crate::{run, run_as}; @@ -19,6 +28,8 @@ fn storage_key(path: &str) -> String { } #[test] +#[ignore] +// this test is outdated, so it is ignored fn everything() { const LEDGER_STARTUP_TIMEOUT_SECONDS: u64 = 30; const CLIENT_COMMAND_TIMEOUT_SECONDS: u64 = 30; @@ -92,3 +103,137 @@ fn everything() { namadac_tx.assert_success(); } } + +/// Tests that we can start the ledger with an endpoint for submitting Ethereum +/// events. This mode can be used in further end-to-end tests. +#[test] +fn run_ledger_with_ethereum_events_endpoint() -> Result<()> { + let test = setup::single_node_net()?; + + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::EventsEndpoint, + ); + + // Start the ledger as a validator + let mut ledger = + run_as!(test, Who::Validator(0), Bin::Node, vec!["ledger"], Some(40))?; + ledger.exp_string( + "Starting to listen for Borsh-serialized Ethereum events", + )?; + ledger.exp_string("Namada ledger node started")?; + + ledger.send_control('c')?; + ledger.exp_string( + "Stopping listening for Borsh-serialized Ethereum events", + )?; + + Ok(()) +} + +/// In this test, we check the following: +/// 1. We can successfully add tranfers to the bridge pool. +/// 2. We can query the bridge pool and it is non-empty. +#[test] +fn test_add_to_bridge_pool() { + const LEDGER_STARTUP_TIMEOUT_SECONDS: u64 = 40; + const CLIENT_COMMAND_TIMEOUT_SECONDS: u64 = 60; + const QUERY_TIMEOUT_SECONDS: u64 = 40; + const SOLE_VALIDATOR: Who = Who::Validator(0); + const RECEIVER: &str = "0x6B175474E89094C55Da98b954EedeAC495271d0F"; + let wnam_address = wnam().to_canonical(); + let test = setup::network( + |mut genesis| { + genesis.ethereum_bridge_params = Some(EthereumBridgeConfig { + min_confirmations: Default::default(), + contracts: Contracts { + native_erc20: wnam(), + bridge: UpgradeableContract { + address: EthAddress([0; 20]), + version: Default::default(), + }, + governance: UpgradeableContract { + address: EthAddress([1; 20]), + version: Default::default(), + }, + }, + }); + genesis + }, + None, + ) + .unwrap(); + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::EventsEndpoint, + ); + + let mut namadan_ledger = run_as!( + test, + SOLE_VALIDATOR, + Bin::Node, + &["ledger"], + Some(LEDGER_STARTUP_TIMEOUT_SECONDS) + ) + .unwrap(); + namadan_ledger + .exp_string("Anoma ledger node started") + .unwrap(); + namadan_ledger + .exp_string("Tendermint node started") + .unwrap(); + namadan_ledger.exp_string("Committed block hash").unwrap(); + let _bg_ledger = namadan_ledger.background(); + + let ledger_addr = get_actor_rpc(&test, &SOLE_VALIDATOR); + let tx_args = vec![ + "add-erc20-transfer", + "--address", + BERTHA, + "--signer", + BERTHA, + "--amount", + "100", + "--erc20", + &wnam_address, + "--ethereum-address", + RECEIVER, + "--fee-amount", + "10", + "--fee-payer", + BERTHA, + "--gas-amount", + "0", + "--gas-limit", + "0", + "--gas-token", + NAM, + "--ledger-address", + &ledger_addr, + ]; + + let mut namadac_tx = run!( + test, + Bin::Client, + tx_args, + Some(CLIENT_COMMAND_TIMEOUT_SECONDS) + ) + .unwrap(); + namadac_tx.exp_string("Transaction accepted").unwrap(); + namadac_tx.exp_string("Transaction applied").unwrap(); + namadac_tx.exp_string("Transaction is valid").unwrap(); + drop(namadac_tx); + + let mut namadar = run!( + test, + Bin::BridgePool, + ["query", "--ledger-address", &ledger_addr,], + Some(QUERY_TIMEOUT_SECONDS), + ) + .unwrap(); + namadar.exp_string(r#""bridge_pool_contents":"#).unwrap(); +} diff --git a/tests/src/e2e/ibc_tests.rs b/tests/src/e2e/ibc_tests.rs index 8151cdaaca..fb8e81d7ab 100644 --- a/tests/src/e2e/ibc_tests.rs +++ b/tests/src/e2e/ibc_tests.rs @@ -67,13 +67,14 @@ use ibc_relayer::light_client::{LightClient, Verified}; use namada::core::ledger::ibc::actions::{commitment_prefix, port_channel_id}; use namada::ledger::ibc::storage::*; use namada::ledger::storage::ics23_specs::ibc_proof_specs; -use namada::ledger::storage::Sha256Hasher; +use namada::ledger::storage::traits::Sha256Hasher; use namada::types::address::{Address, InternalAddress}; use namada::types::key::PublicKey; use namada::types::storage::{BlockHeight, Key, RESERVED_ADDRESS_PREFIX}; use namada::types::token::Amount; use namada_apps::client::rpc::query_storage_value_bytes; use namada_apps::client::utils::id_from_pk; +use namada_apps::config::ethereum_bridge; use setup::constants::*; use tendermint::block::Header as TmHeader; use tendermint::merkle::proof::Proof as TmProof; @@ -83,6 +84,7 @@ use tendermint_proto::Protobuf; use tendermint_rpc::{Client, HttpClient, Url}; use tokio::runtime::Runtime; +use super::setup::set_ethereum_bridge_mode; use crate::e2e::helpers::{find_address, get_actor_rpc, get_validator_pk}; use crate::e2e::setup::{self, sleep, Bin, NamadaCmd, Test, Who}; use crate::{run, run_as}; @@ -90,6 +92,18 @@ use crate::{run, run_as}; #[test] fn run_ledger_ibc() -> Result<()> { let (test_a, test_b) = setup::two_single_node_nets()?; + set_ethereum_bridge_mode( + &test_a, + &test_a.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::Off, + ); + set_ethereum_bridge_mode( + &test_b, + &test_b.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::Off, + ); // Run Chain A let mut ledger_a = diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index dc129818c3..e411c0f96a 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -22,6 +22,7 @@ use data_encoding::HEXLOWER; use namada::types::address::{btc, eth, masp_rewards, Address}; use namada::types::token; use namada_apps::client::tx::ShieldedContext; +use namada_apps::config::ethereum_bridge; use namada_apps::config::genesis::genesis_config::{ GenesisConfig, ParametersConfig, PosParamsConfig, }; @@ -29,7 +30,7 @@ use serde_json::json; use setup::constants::*; use super::helpers::{get_height, is_debug_mode, wait_for_block_height}; -use super::setup::get_all_wasms_hashes; +use super::setup::{get_all_wasms_hashes, set_ethereum_bridge_mode}; use crate::e2e::helpers::{ epoch_sleep, find_address, find_bonded_stake, get_actor_rpc, get_epoch, }; @@ -42,6 +43,14 @@ use crate::{run, run_as}; #[test] fn run_ledger() -> Result<()> { let test = setup::single_node_net()?; + + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::Off, + ); + let cmd_combinations = vec![vec!["ledger"], vec!["ledger", "run"]]; // Start the ledger as a validator @@ -76,20 +85,36 @@ fn test_node_connectivity_and_consensus() -> Result<()> { None, )?; + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::Off, + ); + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(1), + ethereum_bridge::ledger::Mode::Off, + ); + // 1. Run 2 genesis validator ledger nodes and 1 non-validator node let args = ["ledger"]; let mut validator_0 = run_as!(test, Who::Validator(0), Bin::Node, args, Some(40))?; validator_0.exp_string("Namada ledger node started")?; validator_0.exp_string("This node is a validator")?; + validator_0.exp_string("Starting RPC HTTP server on")?; let mut validator_1 = run_as!(test, Who::Validator(1), Bin::Node, args, Some(40))?; validator_1.exp_string("Namada ledger node started")?; validator_1.exp_string("This node is a validator")?; + validator_1.exp_string("Starting RPC HTTP server on")?; let mut non_validator = run_as!(test, Who::NonValidator, Bin::Node, args, Some(40))?; non_validator.exp_string("Namada ledger node started")?; non_validator.exp_string("This node is not a validator")?; + non_validator.exp_string("Starting RPC HTTP server on")?; let bg_validator_0 = validator_0.background(); let bg_validator_1 = validator_1.background(); @@ -120,6 +145,7 @@ fn test_node_connectivity_and_consensus() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -174,11 +200,19 @@ fn test_node_connectivity_and_consensus() -> Result<()> { fn test_namada_shuts_down_if_tendermint_dies() -> Result<()> { let test = setup::single_node_net()?; + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::Off, + ); + // 1. Run the ledger node let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; ledger.exp_string("Namada ledger node started")?; + ledger.exp_string("Starting RPC HTTP server on")?; // 2. Kill the tendermint node sleep(1); @@ -210,6 +244,13 @@ fn test_namada_shuts_down_if_tendermint_dies() -> Result<()> { fn run_ledger_load_state_and_reset() -> Result<()> { let test = setup::single_node_net()?; + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::Off, + ); + // 1. Run the ledger node let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; @@ -277,11 +318,18 @@ fn run_ledger_load_state_and_reset() -> Result<()> { fn ledger_txs_and_queries() -> Result<()> { let test = setup::network(|genesis| genesis, None)?; + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::Off, + ); + // 1. Run the ledger node let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + ledger.exp_string("Starting RPC HTTP server on")?; let _bg_ledger = ledger.background(); let vp_user = wasm_abs_path(VP_USER_WASM); @@ -327,7 +375,7 @@ fn ledger_txs_and_queries() -> Result<()> { "0", "--gas-limit", "0", - "--fee-token", + "--gas-token", NAM, "--ledger-address", &validator_one_rpc, @@ -497,12 +545,19 @@ fn masp_txs_and_queries() -> Result<()> { }, None, )?; + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::Off, + ); // 1. Run the ledger node let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; ledger.exp_string("Namada ledger node started")?; + ledger.exp_string("Starting RPC HTTP server")?; let _bg_ledger = ledger.background(); @@ -763,12 +818,19 @@ fn masp_pinned_txs() -> Result<()> { }, None, )?; + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::Off, + ); // 1. Run the ledger node let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; ledger.exp_string("Namada ledger node started")?; + ledger.exp_string("Starting RPC HTTP server")?; let _bg_ledger = ledger.background(); @@ -926,12 +988,19 @@ fn masp_incentives() -> Result<()> { }, None, )?; + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::Off, + ); // 1. Run the ledger node let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; ledger.exp_string("Namada ledger node started")?; + ledger.exp_string("Starting RPC HTTP server")?; let _bg_ledger = ledger.background(); @@ -1626,12 +1695,18 @@ fn masp_incentives() -> Result<()> { fn invalid_transactions() -> Result<()> { let test = setup::single_node_net()?; + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::Off, + ); + // 1. Run the ledger node let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; ledger.exp_string("Namada ledger node started")?; - // Wait to commit a block - ledger.exp_regex(r"Committed block hash.*, height: [0-9]+")?; + ledger.exp_string("Starting RPC HTTP server on")?; let bg_ledger = ledger.background(); @@ -1781,11 +1856,18 @@ fn pos_bonds() -> Result<()> { None, )?; + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::Off, + ); + // 1. Run the ledger node let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + ledger.exp_string("Starting RPC HTTP server on")?; let _bg_ledger = ledger.background(); let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); @@ -1808,6 +1890,7 @@ fn pos_bonds() -> Result<()> { ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -1830,6 +1913,7 @@ fn pos_bonds() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -1851,6 +1935,7 @@ fn pos_bonds() -> Result<()> { ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -1873,6 +1958,7 @@ fn pos_bonds() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -1914,6 +2000,7 @@ fn pos_bonds() -> Result<()> { ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -1934,6 +2021,7 @@ fn pos_bonds() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -1974,11 +2062,18 @@ fn pos_init_validator() -> Result<()> { None, )?; + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::Off, + ); + // 1. Run the ledger node let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + ledger.exp_string("Starting RPC HTTP server on")?; let _bg_ledger = ledger.background(); let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); @@ -2007,6 +2102,7 @@ fn pos_init_validator() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -2032,6 +2128,7 @@ fn pos_init_validator() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); // Then self-bond the tokens: @@ -2053,6 +2150,7 @@ fn pos_init_validator() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -2077,6 +2175,7 @@ fn pos_init_validator() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -2097,6 +2196,7 @@ fn pos_init_validator() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -2140,11 +2240,18 @@ fn ledger_many_txs_in_a_block() -> Result<()> { Some("10s"), )?); + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::Off, + ); + // 1. Run the ledger node let mut ledger = run_as!(*test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + ledger.exp_string("Starting RPC HTTP server on")?; // Wait to commit a block ledger.exp_regex(r"Committed block hash.*, height: [0-9]+")?; @@ -2226,6 +2333,7 @@ fn proposal_submission() -> Result<()> { |genesis| { let parameters = ParametersConfig { epochs_per_year: epochs_per_year_from_min_duration(1), + max_proposal_bytes: Default::default(), min_num_of_blocks: 1, max_expected_time_per_block: 1, vp_whitelist: Some(get_all_wasms_hashes( @@ -2249,6 +2357,13 @@ fn proposal_submission() -> Result<()> { None, )?; + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::Off, + ); + let namadac_help = vec!["--help"]; let mut client = run!(test, Bin::Client, namadac_help, Some(40))?; @@ -2259,7 +2374,7 @@ fn proposal_submission() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + ledger.exp_string("Starting RPC HTTP server on")?; let _bg_ledger = ledger.background(); let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); @@ -2283,6 +2398,7 @@ fn proposal_submission() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -2299,6 +2415,7 @@ fn proposal_submission() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, submit_proposal_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -2458,6 +2575,7 @@ fn proposal_submission() -> Result<()> { submit_proposal_vote, Some(15) )?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -2475,6 +2593,7 @@ fn proposal_submission() -> Result<()> { let mut client = run!(test, Bin::Client, submit_proposal_vote_delagator, Some(40))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -2494,6 +2613,7 @@ fn proposal_submission() -> Result<()> { // this is valid because the client filter ALBERT delegation and there are // none let mut client = run!(test, Bin::Client, submit_proposal_vote, Some(15))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -2576,11 +2696,18 @@ fn proposal_submission() -> Result<()> { fn proposal_offline() -> Result<()> { let test = setup::network(|genesis| genesis, None)?; + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::Off, + ); + // 1. Run the ledger node let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(20))?; - ledger.exp_string("Namada ledger node started")?; + ledger.exp_string("Starting RPC HTTP server on")?; let _bg_ledger = ledger.background(); let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); @@ -2604,6 +2731,7 @@ fn proposal_offline() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -2975,6 +3103,8 @@ fn test_genesis_validators() -> Result<()> { // We have to update the ports in the configs again, because the ones from // `join-network` use the defaults + // + // TODO: use `update_actor_config` from `setup`, instead let update_config = |ix: u8, mut config: Config| { let first_port = net_address_port_0 + 6 * (ix as u16 + 1); config.ledger.tendermint.p2p_address.set_port(first_port); @@ -2984,6 +3114,8 @@ fn test_genesis_validators() -> Result<()> { .rpc_address .set_port(first_port + 1); config.ledger.shell.ledger_address.set_port(first_port + 2); + // disable eth full node + config.ledger.ethereum_bridge.mode = ethereum_bridge::ledger::Mode::Off; config }; @@ -3017,16 +3149,19 @@ fn test_genesis_validators() -> Result<()> { run_as!(test, Who::Validator(0), Bin::Node, args, Some(40))?; validator_0.exp_string("Namada ledger node started")?; validator_0.exp_string("This node is a validator")?; + validator_0.exp_string("Starting RPC HTTP server on")?; let mut validator_1 = run_as!(test, Who::Validator(1), Bin::Node, args, Some(40))?; validator_1.exp_string("Namada ledger node started")?; validator_1.exp_string("This node is a validator")?; + validator_1.exp_string("Starting RPC HTTP server on")?; let mut non_validator = run_as!(test, Who::NonValidator, Bin::Node, args, Some(40))?; non_validator.exp_string("Namada ledger node started")?; non_validator.exp_string("This node is not a validator")?; + non_validator.exp_string("Starting RPC HTTP server on")?; let bg_validator_0 = validator_0.background(); let bg_validator_1 = validator_1.background(); @@ -3055,6 +3190,7 @@ fn test_genesis_validators() -> Result<()> { ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); @@ -3125,6 +3261,19 @@ fn double_signing_gets_slashed() -> Result<()> { None, )?; + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(0), + ethereum_bridge::ledger::Mode::Off, + ); + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + &Who::Validator(1), + ethereum_bridge::ledger::Mode::Off, + ); + // 1. Run 2 genesis validator ledger nodes let args = ["ledger"]; let mut validator_0 = diff --git a/tests/src/e2e/setup.rs b/tests/src/e2e/setup.rs index 15513f869a..7361563627 100644 --- a/tests/src/e2e/setup.rs +++ b/tests/src/e2e/setup.rs @@ -22,6 +22,7 @@ use itertools::{Either, Itertools}; use namada::types::chain::ChainId; use namada_apps::client::utils; use namada_apps::config::genesis::genesis_config::{self, GenesisConfig}; +use namada_apps::config::{ethereum_bridge, Config}; use namada_apps::{config, wallet}; use rand::Rng; use serde_json; @@ -66,6 +67,37 @@ pub fn default_port_offset(ix: u8) -> u16 { 6 * ix as u16 } +/// Update the config of some node `who`. +pub fn update_actor_config( + test: &Test, + chain_id: &ChainId, + who: &Who, + update: F, +) where + F: FnOnce(&mut Config), +{ + let validator_base_dir = test.get_base_dir(who); + let mut validator_config = + Config::load(&validator_base_dir, chain_id, None); + update(&mut validator_config); + validator_config + .write(&validator_base_dir, chain_id, true) + .unwrap(); +} + +/// Configures the Ethereum bridge mode of `who`. This should be done before +/// `who` starts running. +pub fn set_ethereum_bridge_mode( + test: &Test, + chain_id: &ChainId, + who: &Who, + mode: ethereum_bridge::ledger::Mode, +) { + update_actor_config(test, chain_id, who, |config| { + config.ledger.ethereum_bridge.mode = mode; + }); +} + /// Set `num` validators to the genesis config. Note that called from inside /// the [`network`]'s first argument's closure, e.g. `set_validators(2, _)` will /// configure a network with 2 validators. @@ -183,6 +215,10 @@ pub fn network( println!("'init-network' output: {}", unread); let net = Network { chain_id }; + // release lock on wallet by dropping the + // child process + drop(init_network); + // Move the "others" accounts wallet in the main base dir, so that we can // use them with `Who::NonValidator` let chain_dir = test_dir.path().join(net.chain_id.as_str()); @@ -217,6 +253,7 @@ pub enum Bin { Node, Client, Wallet, + BridgePool, } #[derive(Debug)] @@ -671,10 +708,11 @@ where S: AsRef, { // Root cargo workspace manifest path - let bin_name = match bin { - Bin::Node => "namadan", - Bin::Client => "namadac", - Bin::Wallet => "namadaw", + let (bin_name, log_level) = match bin { + Bin::Node => ("namadan", "info"), + Bin::Client => ("namadac", "tendermint_rpc=debug"), + Bin::Wallet => ("namadaw", "info"), + Bin::BridgePool => ("namadar", "info"), }; let mut run_cmd = generate_bin_command( @@ -683,7 +721,8 @@ where ); run_cmd - .env("NAMADA_LOG", "info") + .env("NAMADA_LOG", log_level) + .env("NAMADA_TM_STDOUT", "true") .env("TM_LOG_LEVEL", "info") .env("NAMADA_LOG_COLOR", "false") .current_dir(working_dir) diff --git a/tests/src/native_vp/eth_bridge_pool.rs b/tests/src/native_vp/eth_bridge_pool.rs new file mode 100644 index 0000000000..450b0460ff --- /dev/null +++ b/tests/src/native_vp/eth_bridge_pool.rs @@ -0,0 +1,185 @@ +#[cfg(test)] +mod test_bridge_pool_vp { + use std::path::PathBuf; + + use borsh::{BorshDeserialize, BorshSerialize}; + use namada::core::ledger::eth_bridge::storage::bridge_pool::BRIDGE_POOL_ADDRESS; + use namada::ledger::eth_bridge::{ + wrapped_erc20s, Contracts, EthereumBridgeConfig, UpgradeableContract, + ADDRESS, + }; + use namada::ledger::native_vp::ethereum_bridge::bridge_pool_vp::BridgePoolVp; + use namada::proto::Tx; + use namada::types::address::{nam, wnam}; + use namada::types::eth_bridge_pool::{ + GasFee, PendingTransfer, TransferToEthereum, + }; + use namada::types::ethereum_events::EthAddress; + use namada::types::key::{common, ed25519, SecretKey}; + use namada::types::token::Amount; + use namada_apps::wallet::defaults::{albert_address, bertha_address}; + use namada_apps::wasm_loader; + + use crate::native_vp::TestNativeVpEnv; + use crate::tx::{tx_host_env, TestTxEnv}; + + const ADD_TRANSFER_WASM: &str = "tx_bridge_pool.wasm"; + const ASSET: EthAddress = EthAddress([1; 20]); + const BERTHA_WEALTH: u64 = 1_000_000; + const BERTHA_TOKENS: u64 = 10_000; + const GAS_FEE: u64 = 100; + const TOKENS: u64 = 10; + + /// A signing keypair for good old Bertha. + fn bertha_keypair() -> common::SecretKey { + // generated from + // [`namada::types::key::ed25519::gen_keypair`] + let bytes = [ + 240, 3, 224, 69, 201, 148, 60, 53, 112, 79, 80, 107, 101, 127, 186, + 6, 176, 162, 113, 224, 62, 8, 183, 187, 124, 234, 244, 251, 92, 36, + 119, 243, + ]; + let ed_sk = ed25519::SecretKey::try_from_slice(&bytes).unwrap(); + ed_sk.try_to_sk().unwrap() + } + + /// Gets the absolute path to wasm directory + fn wasm_dir() -> PathBuf { + let mut current_path = std::env::current_dir() + .expect("Current directory should exist") + .canonicalize() + .expect("Current directory should exist"); + while current_path.file_name().unwrap() != "tests" { + current_path.pop(); + } + current_path.pop(); + current_path.join("wasm") + } + + /// Create necessary accounts and balances for the test. + fn setup_env(tx: Tx) -> TestTxEnv { + let mut env = TestTxEnv { + tx, + ..Default::default() + }; + let config = EthereumBridgeConfig { + min_confirmations: Default::default(), + contracts: Contracts { + native_erc20: wnam(), + bridge: UpgradeableContract { + address: EthAddress([42; 20]), + version: Default::default(), + }, + governance: UpgradeableContract { + address: EthAddress([18; 20]), + version: Default::default(), + }, + }, + }; + // initialize Ethereum bridge storage + config.init_storage(&mut env.storage); + // initialize Bertha's account + env.spawn_accounts([&albert_address(), &bertha_address(), &nam()]); + // enrich Albert + env.credit_tokens( + &albert_address(), + &nam(), + None, + BERTHA_WEALTH.into(), + ); + // enrich Bertha + env.credit_tokens( + &bertha_address(), + &nam(), + None, + BERTHA_WEALTH.into(), + ); + // Bertha has ERC20 tokens too. + let sub_prefix = wrapped_erc20s::sub_prefix(&ASSET); + env.credit_tokens( + &bertha_address(), + &ADDRESS, + Some(sub_prefix), + BERTHA_TOKENS.into(), + ); + env + } + + fn validate_tx(tx: Tx) { + let env = setup_env(tx); + tx_host_env::set(env); + let mut tx_env = tx_host_env::take(); + tx_env.execute_tx().expect("Test failed."); + let vp_env = TestNativeVpEnv::from_tx_env(tx_env, BRIDGE_POOL_ADDRESS); + let result = vp_env + .validate_tx(|ctx| BridgePoolVp { ctx }) + .expect("Test failed"); + assert!(result); + } + + #[test] + fn validate_erc20_tx() { + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: ASSET, + recipient: EthAddress([0; 20]), + sender: bertha_address(), + amount: Amount::from(TOKENS), + nonce: Default::default(), + }, + gas_fee: GasFee { + amount: Amount::from(GAS_FEE), + payer: bertha_address(), + }, + }; + let data = transfer.try_to_vec().expect("Test failed"); + let code = + wasm_loader::read_wasm_or_exit(wasm_dir(), ADD_TRANSFER_WASM); + let tx = Tx::new(code, Some(data)).sign(&bertha_keypair()); + validate_tx(tx); + } + + #[test] + fn validate_mint_wnam_tx() { + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: wnam(), + recipient: EthAddress([0; 20]), + sender: bertha_address(), + amount: Amount::from(TOKENS), + nonce: Default::default(), + }, + gas_fee: GasFee { + amount: Amount::from(GAS_FEE), + payer: bertha_address(), + }, + }; + let data = transfer.try_to_vec().expect("Test failed"); + let code = + wasm_loader::read_wasm_or_exit(wasm_dir(), ADD_TRANSFER_WASM); + let tx = Tx::new(code, Some(data)).sign(&bertha_keypair()); + validate_tx(tx); + } + + #[test] + fn validate_mint_wnam_different_sender_tx() { + let transfer = PendingTransfer { + transfer: TransferToEthereum { + asset: wnam(), + recipient: EthAddress([0; 20]), + sender: bertha_address(), + amount: Amount::from(TOKENS), + nonce: Default::default(), + }, + gas_fee: GasFee { + amount: Amount::from(GAS_FEE), + payer: albert_address(), + }, + }; + let data = transfer.try_to_vec().expect("Test failed"); + let code = + wasm_loader::read_wasm_or_exit(wasm_dir(), ADD_TRANSFER_WASM); + let tx = Tx::new(code, Some(data)).sign(&bertha_keypair()); + validate_tx(tx); + } +} diff --git a/tests/src/native_vp/mod.rs b/tests/src/native_vp/mod.rs index 28dad14557..2f5ade76a8 100644 --- a/tests/src/native_vp/mod.rs +++ b/tests/src/native_vp/mod.rs @@ -1,10 +1,11 @@ +pub mod eth_bridge_pool; pub mod pos; use std::collections::BTreeSet; use namada::ledger::native_vp::{Ctx, NativeVp}; use namada::ledger::storage::mockdb::MockDB; -use namada::ledger::storage::Sha256Hasher; +use namada::ledger::storage::traits::Sha256Hasher; use namada::types::address::Address; use namada::types::storage; use namada::vm::WasmCacheRwAccess; diff --git a/tests/src/vm_host_env/ibc.rs b/tests/src/vm_host_env/ibc.rs index 88f2d205a9..197b275252 100644 --- a/tests/src/vm_host_env/ibc.rs +++ b/tests/src/vm_host_env/ibc.rs @@ -59,7 +59,7 @@ use namada::ledger::ibc::vp::{ }; use namada::ledger::native_vp::{Ctx, NativeVp}; use namada::ledger::storage::mockdb::MockDB; -use namada::ledger::storage::Sha256Hasher; +use namada::ledger::storage::traits::Sha256Hasher; use namada::ledger::tx_env::TxEnv; use namada::proto::Tx; use namada::tendermint_proto::Protobuf; diff --git a/tests/src/vm_host_env/vp.rs b/tests/src/vm_host_env/vp.rs index 50cbc7b6ef..0da4680a8e 100644 --- a/tests/src/vm_host_env/vp.rs +++ b/tests/src/vm_host_env/vp.rs @@ -100,7 +100,7 @@ mod native_vp_host_env { // TODO replace with `std::concat_idents` once stabilized (https://github.com/rust-lang/rust/issues/29599) use concat_idents::concat_idents; - use namada::ledger::storage::Sha256Hasher; + use namada::ledger::storage::traits::Sha256Hasher; use namada::vm::host_env::*; use namada::vm::WasmCacheRwAccess; diff --git a/tx_prelude/Cargo.toml b/tx_prelude/Cargo.toml index 945f14ad7b..ae0e303939 100644 --- a/tx_prelude/Cargo.toml +++ b/tx_prelude/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_tx_prelude" resolver = "2" -version = "0.12.0" +version = "0.12.1" [features] default = ["abciplus"] diff --git a/tx_prelude/src/lib.rs b/tx_prelude/src/lib.rs index d0d4511577..1eb71f720f 100644 --- a/tx_prelude/src/lib.rs +++ b/tx_prelude/src/lib.rs @@ -16,6 +16,7 @@ use core::slice; use std::marker::PhantomData; pub use borsh::{BorshDeserialize, BorshSerialize}; +pub use namada_core::ledger::eth_bridge; pub use namada_core::ledger::governance::storage as gov_storage; pub use namada_core::ledger::parameters::storage as parameters_storage; pub use namada_core::ledger::slash_fund::storage as slash_fund_storage; @@ -29,13 +30,14 @@ pub use namada_core::ledger::tx_env::TxEnv; pub use namada_core::proto::{Signed, SignedTxData}; pub use namada_core::types::address::Address; use namada_core::types::chain::CHAIN_ID_LENGTH; +pub use namada_core::types::ethereum_events::EthAddress; use namada_core::types::internal::HostEnvResult; use namada_core::types::storage::TxIndex; pub use namada_core::types::storage::{ self, BlockHash, BlockHeight, Epoch, BLOCK_HASH_LENGTH, }; use namada_core::types::time::Rfc3339String; -pub use namada_core::types::*; +pub use namada_core::types::{eth_bridge_pool, *}; pub use namada_macros::transaction; use namada_vm_env::tx::*; use namada_vm_env::{read_from_buffer, read_key_val_bytes_from_buffer}; diff --git a/tx_prelude/src/proof_of_stake.rs b/tx_prelude/src/proof_of_stake.rs index c611c6780e..43c899220b 100644 --- a/tx_prelude/src/proof_of_stake.rs +++ b/tx_prelude/src/proof_of_stake.rs @@ -8,6 +8,7 @@ use namada_proof_of_stake::storage::{ bond_key, params_key, total_deltas_key, unbond_key, validator_address_raw_hash_key, validator_commission_rate_key, validator_consensus_key_key, validator_deltas_key, + validator_eth_cold_key_key, validator_eth_hot_key_key, validator_max_commission_rate_change_key, validator_set_key, validator_slashes_key, validator_state_key, BondId, Bonds, TotalDeltas, Unbonds, ValidatorConsensusKeys, ValidatorDeltas, ValidatorSets, @@ -98,6 +99,8 @@ impl Ctx { InitValidator { account_key, consensus_key, + eth_cold_key, + eth_hot_key, protocol_key, dkg_key, commission_rate, @@ -114,10 +117,14 @@ impl Ctx { self.write(&protocol_pk_key, &protocol_key)?; let dkg_pk_key = key::dkg_session_keys::dkg_pk_key(&validator_address); self.write(&dkg_pk_key, &dkg_key)?; + let eth_cold_key = key::common::PublicKey::Secp256k1(eth_cold_key); + let eth_hot_key = key::common::PublicKey::Secp256k1(eth_hot_key); self.become_validator( &validator_address, &consensus_key, + ð_cold_key, + ð_hot_key, current_epoch, commission_rate, max_commission_rate_change, @@ -156,6 +163,22 @@ impl namada_proof_of_stake::PosActions for Ctx { self.write(&validator_consensus_key_key(key), &value) } + fn write_validator_eth_cold_key( + &mut self, + address: &Address, + value: types::ValidatorEthKey, + ) -> Result<(), storage_api::Error> { + self.write(&validator_eth_cold_key_key(address), &value) + } + + fn write_validator_eth_hot_key( + &mut self, + address: &Address, + value: types::ValidatorEthKey, + ) -> Result<(), storage_api::Error> { + self.write(&validator_eth_hot_key_key(address), &value) + } + fn write_validator_state( &mut self, key: &Address, diff --git a/vm_env/Cargo.toml b/vm_env/Cargo.toml index cf4e6a7d9f..df7f726863 100644 --- a/vm_env/Cargo.toml +++ b/vm_env/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_vm_env" resolver = "2" -version = "0.12.0" +version = "0.12.1" [features] default = ["abciplus"] diff --git a/vp_prelude/Cargo.toml b/vp_prelude/Cargo.toml index a0d76feadc..3489f62172 100644 --- a/vp_prelude/Cargo.toml +++ b/vp_prelude/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_vp_prelude" resolver = "2" -version = "0.12.0" +version = "0.12.1" [features] default = ["abciplus"] diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index bd82ea3be8..53207ce2c9 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -98,6 +98,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ark-ed-on-bls12-381" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43b7ada17db3854f5994e74e60b18e10e818594935ee7e1d329800c117b32970" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-ff", + "ark-std", +] + [[package]] name = "ark-ff" version = "0.3.0" @@ -138,6 +150,19 @@ dependencies = [ "syn", ] +[[package]] +name = "ark-poly" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b0f78f47537c2f15706db7e98fe64cc1711dbf9def81218194e17239e53e5aa" +dependencies = [ + "ark-ff", + "ark-serialize", + "ark-std", + "derivative", + "hashbrown 0.11.2", +] + [[package]] name = "ark-serialize" version = "0.3.0" @@ -287,7 +312,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43473b34abc4b0b405efa0a250bac87eea888182b21687ee5c8115d279b0fda5" dependencies = [ - "bitvec", + "bitvec 0.22.3", "blake2s_simd 0.5.11", "byteorder", "crossbeam-channel 0.5.6", @@ -312,6 +337,15 @@ dependencies = [ "crunchy 0.1.6", ] +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bip0039" version = "0.9.0" @@ -374,10 +408,31 @@ version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5237f00a8c86130a0cc317830e558b966dd7850d48a953d998c813f01a41b527" dependencies = [ - "funty", - "radium", + "funty 1.2.0", + "radium 0.6.2", "tap", - "wyz", + "wyz 0.4.0", +] + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty 2.0.0", + "radium 0.7.0", + "tap", + "wyz 0.5.1", +] + +[[package]] +name = "blake2" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b12e5fd123190ce1c2e559308a94c9bacad77907d4c6005d9e58fe1a0689e55e" +dependencies = [ + "digest 0.10.5", ] [[package]] @@ -502,7 +557,7 @@ source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c dependencies = [ "borsh-derive-internal", "borsh-schema-derive-internal", - "proc-macro-crate", + "proc-macro-crate 0.1.5", "proc-macro2", "syn", ] @@ -533,6 +588,12 @@ version = "3.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +[[package]] +name = "byte-slice-cast" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" + [[package]] name = "bytecheck" version = "0.6.9" @@ -1221,6 +1282,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" dependencies = [ + "serde", "signature", ] @@ -1247,6 +1309,10 @@ checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ "curve25519-dalek", "ed25519", + "merlin", + "rand 0.7.3", + "serde", + "serde_bytes", "sha2 0.9.9", "zeroize", ] @@ -1334,6 +1400,50 @@ dependencies = [ "version_check", ] +[[package]] +name = "ethabi" +version = "17.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4966fba78396ff92db3b817ee71143eccd98acf0f876b8d600e585a670c5d1b" +dependencies = [ + "ethereum-types", + "hex", + "once_cell", + "regex", + "serde", + "serde_json", + "sha3 0.10.6", + "thiserror", + "uint", +] + +[[package]] +name = "ethbloom" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11da94e443c60508eb62cf256243a64da87304c2802ac2528847f79d750007ef" +dependencies = [ + "crunchy 0.2.2", + "fixed-hash", + "impl-rlp", + "impl-serde", + "tiny-keccak", +] + +[[package]] +name = "ethereum-types" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2827b94c556145446fcce834ca86b7abf0c39a805883fe20e72c5bfdb5a0dc6" +dependencies = [ + "ethbloom", + "fixed-hash", + "impl-rlp", + "impl-serde", + "primitive-types", + "uint", +] + [[package]] name = "eyre" version = "0.6.8" @@ -1359,10 +1469,47 @@ dependencies = [ "instant", ] +[[package]] +name = "ferveo" +version = "0.1.1" +source = "git+https://github.com/anoma/ferveo?rev=9e5e91c954158e7cff45c483fd06cd649a81553f#9e5e91c954158e7cff45c483fd06cd649a81553f" +dependencies = [ + "anyhow", + "ark-bls12-381", + "ark-ec", + "ark-ed-on-bls12-381", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "bincode", + "blake2", + "blake2b_simd 1.0.0", + "borsh", + "digest 0.10.5", + "ed25519-dalek", + "either", + "ferveo-common", + "group-threshold-cryptography", + "hex", + "itertools", + "measure_time", + "miracl_core", + "num", + "rand 0.7.3", + "rand 0.8.5", + "serde", + "serde_bytes", + "serde_json", + "subproductdomain", + "subtle", + "zeroize", +] + [[package]] name = "ferveo-common" version = "0.1.0" -source = "git+https://github.com/anoma/ferveo#1022ab2c7ccc689abcc05e5a08df6fb0c2a3fc65" +source = "git+https://github.com/anoma/ferveo?rev=9e5e91c954158e7cff45c483fd06cd649a81553f#9e5e91c954158e7cff45c483fd06cd649a81553f" dependencies = [ "anyhow", "ark-ec", @@ -1378,11 +1525,23 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "131655483be284720a17d74ff97592b8e76576dc25563148601df2d7c9080924" dependencies = [ - "bitvec", + "bitvec 0.22.3", "rand_core 0.6.4", "subtle", ] +[[package]] +name = "fixed-hash" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + [[package]] name = "fixedbitset" version = "0.4.2" @@ -1434,6 +1593,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1847abb9cb65d566acd5942e94aea9c8f547ad02c98e1649326fc0e8910b8b1e" +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + [[package]] name = "futures" version = "0.3.25" @@ -1592,6 +1757,30 @@ dependencies = [ "subtle", ] +[[package]] +name = "group-threshold-cryptography" +version = "0.1.0" +source = "git+https://github.com/anoma/ferveo?rev=9e5e91c954158e7cff45c483fd06cd649a81553f#9e5e91c954158e7cff45c483fd06cd649a81553f" +dependencies = [ + "anyhow", + "ark-bls12-381", + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "blake2b_simd 1.0.0", + "chacha20", + "hex", + "itertools", + "miracl_core", + "rand 0.8.5", + "rand_core 0.6.4", + "rayon", + "subproductdomain", + "thiserror", +] + [[package]] name = "gumdrop" version = "0.8.1" @@ -2012,7 +2201,7 @@ dependencies = [ "prost", "ripemd160", "sha2 0.9.9", - "sha3", + "sha3 0.9.1", "sp-std", ] @@ -2032,6 +2221,44 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-rlp" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" +dependencies = [ + "rlp", +] + +[[package]] +name = "impl-serde" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "incrementalmerkletree" version = "0.2.0" @@ -2083,6 +2310,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if 1.0.0", + "js-sys", + "wasm-bindgen", + "web-sys", ] [[package]] @@ -2115,7 +2345,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2e7baec19d4e83f9145d4891178101a604565edff9645770fc979804138b04c" dependencies = [ - "bitvec", + "bitvec 0.22.3", "bls12_381", "ff", "group", @@ -2285,7 +2515,7 @@ source = "git+https://github.com/anoma/masp?rev=bee40fc465f6afbd10558d12fe96eb17 dependencies = [ "aes", "bip0039", - "bitvec", + "bitvec 0.22.3", "blake2b_simd 1.0.0", "blake2s_simd 1.0.0", "bls12_381", @@ -2345,6 +2575,16 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" +[[package]] +name = "measure_time" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56220900f1a0923789ecd6bf25fbae8af3b2f1ff3e9e297fc9b6b8674dd4d852" +dependencies = [ + "instant", + "log", +] + [[package]] name = "memchr" version = "2.5.0" @@ -2393,6 +2633,18 @@ dependencies = [ "nonempty", ] +[[package]] +name = "merlin" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.5.1", + "zeroize", +] + [[package]] name = "mime" version = "0.3.16" @@ -2420,6 +2672,12 @@ dependencies = [ "windows-sys 0.42.0", ] +[[package]] +name = "miracl_core" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94c7128ba23c81f6471141b90f17654f89ef44a56e14b8a4dd0fddfccd655277" + [[package]] name = "moka" version = "0.8.6" @@ -2456,7 +2714,7 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "namada" -version = "0.12.0" +version = "0.12.1" dependencies = [ "async-trait", "bellman", @@ -2466,6 +2724,8 @@ dependencies = [ "clru", "data-encoding", "derivative", + "eyre", + "ferveo-common", "ibc", "ibc-proto", "itertools", @@ -2473,14 +2733,18 @@ dependencies = [ "masp_primitives", "masp_proofs", "namada_core", + "namada_ethereum_bridge", "namada_proof_of_stake", "parity-wasm", "paste", "proptest", "prost", "pwasm-utils", + "rand 0.8.5", + "rand_core 0.6.4", "rayon", "rust_decimal", + "serde", "serde_json", "sha2 0.9.9", "tempfile", @@ -2500,9 +2764,10 @@ dependencies = [ [[package]] name = "namada_core" -version = "0.12.0" +version = "0.12.1" dependencies = [ "ark-bls12-381", + "ark-ec", "ark-serialize", "bech32", "bellman", @@ -2511,7 +2776,11 @@ dependencies = [ "data-encoding", "derivative", "ed25519-consensus", + "ethabi", + "eyre", + "ferveo", "ferveo-common", + "group-threshold-cryptography", "ibc", "ibc-proto", "ics23", @@ -2519,6 +2788,7 @@ dependencies = [ "itertools", "libsecp256k1", "masp_primitives", + "num-rational", "proptest", "prost", "prost-types", @@ -2534,14 +2804,32 @@ dependencies = [ "tendermint", "tendermint-proto", "thiserror", + "tiny-keccak", "tonic-build", "tracing", "zeroize", ] +[[package]] +name = "namada_ethereum_bridge" +version = "0.11.0" +dependencies = [ + "borsh", + "eyre", + "itertools", + "namada_core", + "namada_proof_of_stake", + "serde", + "serde_json", + "tendermint", + "tendermint-proto", + "tendermint-rpc", + "tracing", +] + [[package]] name = "namada_macros" -version = "0.12.0" +version = "0.12.1" dependencies = [ "quote", "syn", @@ -2549,21 +2837,23 @@ dependencies = [ [[package]] name = "namada_proof_of_stake" -version = "0.12.0" +version = "0.12.1" dependencies = [ "borsh", "derivative", + "ferveo-common", "namada_core", "proptest", "rust_decimal", "rust_decimal_macros", + "tendermint-proto", "thiserror", "tracing", ] [[package]] name = "namada_tests" -version = "0.12.0" +version = "0.12.1" dependencies = [ "chrono", "concat-idents", @@ -2592,7 +2882,7 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.12.0" +version = "0.12.1" dependencies = [ "borsh", "masp_primitives", @@ -2607,7 +2897,7 @@ dependencies = [ [[package]] name = "namada_vm_env" -version = "0.12.0" +version = "0.12.1" dependencies = [ "borsh", "hex", @@ -2618,7 +2908,7 @@ dependencies = [ [[package]] name = "namada_vp_prelude" -version = "0.12.0" +version = "0.12.1" dependencies = [ "borsh", "namada_core", @@ -2631,7 +2921,7 @@ dependencies = [ [[package]] name = "namada_wasm" -version = "0.12.0" +version = "0.12.1" dependencies = [ "borsh", "getrandom 0.2.8", @@ -2664,6 +2954,20 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" +[[package]] +name = "num" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + [[package]] name = "num-bigint" version = "0.4.3" @@ -2676,6 +2980,15 @@ dependencies = [ "serde", ] +[[package]] +name = "num-complex" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19" +dependencies = [ + "num-traits", +] + [[package]] name = "num-derive" version = "0.3.3" @@ -2697,6 +3010,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-rational" version = "0.4.1" @@ -2777,7 +3101,7 @@ dependencies = [ "aes", "arrayvec 0.7.2", "bigint", - "bitvec", + "bitvec 0.22.3", "blake2b_simd 1.0.0", "ff", "fpe", @@ -2804,6 +3128,32 @@ dependencies = [ "group", ] +[[package]] +name = "parity-scale-codec" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "366e44391a8af4cfd6002ef6ba072bae071a96aafca98d7d448a34c5dca38b6a" +dependencies = [ + "arrayvec 0.7.2", + "bitvec 1.0.1", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" +dependencies = [ + "proc-macro-crate 1.2.1", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "parity-wasm" version = "0.45.0" @@ -2997,6 +3347,19 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "primitive-types" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e28720988bff275df1f51b171e1b2a18c30d194c4d2b61defdacecd625a5d94a" +dependencies = [ + "fixed-hash", + "impl-codec", + "impl-rlp", + "impl-serde", + "uint", +] + [[package]] name = "proc-macro-crate" version = "0.1.5" @@ -3006,6 +3369,17 @@ dependencies = [ "toml", ] +[[package]] +name = "proc-macro-crate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" +dependencies = [ + "once_cell", + "thiserror", + "toml", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -3195,6 +3569,12 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + [[package]] name = "rand" version = "0.7.3" @@ -3481,6 +3861,16 @@ dependencies = [ "syn", ] +[[package]] +name = "rlp" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" +dependencies = [ + "bytes", + "rustc-hex", +] + [[package]] name = "rust_decimal" version = "1.26.1" @@ -3515,6 +3905,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + [[package]] name = "rustc_version" version = "0.3.3" @@ -3879,6 +4275,16 @@ dependencies = [ "opaque-debug", ] +[[package]] +name = "sha3" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" +dependencies = [ + "digest 0.10.5", + "keccak", +] + [[package]] name = "sharded-slab" version = "0.1.4" @@ -3998,6 +4404,19 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "subproductdomain" +version = "0.1.0" +source = "git+https://github.com/anoma/ferveo?rev=9e5e91c954158e7cff45c483fd06cd649a81553f#9e5e91c954158e7cff45c483fd06cd649a81553f" +dependencies = [ + "anyhow", + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", +] + [[package]] name = "subtle" version = "2.4.1" @@ -4601,7 +5020,7 @@ dependencies = [ [[package]] name = "tx_template" -version = "0.12.0" +version = "0.12.1" dependencies = [ "borsh", "getrandom 0.2.8", @@ -4738,7 +5157,7 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "vp_template" -version = "0.12.0" +version = "0.12.1" dependencies = [ "borsh", "getrandom 0.2.8", @@ -5307,6 +5726,15 @@ dependencies = [ "tap", ] +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + [[package]] name = "zcash_encoding" version = "0.0.0" @@ -5346,7 +5774,7 @@ source = "git+https://github.com/zcash/librustzcash/?rev=2425a08#2425a0869098e3b dependencies = [ "aes", "bip0039", - "bitvec", + "bitvec 0.22.3", "blake2b_simd 1.0.0", "blake2s_simd 1.0.0", "bls12_381", diff --git a/wasm/checksums.json b/wasm/checksums.json index b13508300c..ce1a992538 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,20 +1,21 @@ { - "tx_bond.wasm": "tx_bond.be9c75f96b3b4880b7934d42ee218582b6304f6326a4588d1e6ac1ea4cc61c49.wasm", - "tx_change_validator_commission.wasm": "tx_change_validator_commission.cd861e0e82f4934be6d8382d6fff98286b4fadbc20ab826b9e817f6666021273.wasm", - "tx_ibc.wasm": "tx_ibc.13daeb0c88abba264d3052129eda0713bcf1a71f6f69bf37ec2494d0d9119f1f.wasm", - "tx_init_account.wasm": "tx_init_account.e21cfd7e96802f8e841613fb89f1571451401d002a159c5e9586855ac1374df5.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.b9a77bc9e416f33f1e715f25696ae41582e1b379422f7a643549884e0c73e9de.wasm", - "tx_init_validator.wasm": "tx_init_validator.1e9732873861c625f239e74245f8c504a57359c06614ba40387a71811ca4a097.wasm", - "tx_reveal_pk.wasm": "tx_reveal_pk.47bc922a8be5571620a647ae442a1af7d03d05d29bef95f0b32cdfe00b11fee9.wasm", - "tx_transfer.wasm": "tx_transfer.bbd1ef5d9461c78f0288986de046baad77e10671addc5edaf3c68ea1ae4ecc99.wasm", - "tx_unbond.wasm": "tx_unbond.c0a690d0ad43a94294a6405bae3327f638a657446c74dc61dbb3a4d2ce488b5e.wasm", - "tx_update_vp.wasm": "tx_update_vp.ee2e9b882c4accadf4626e87d801c9ac8ea8c61ccea677e0532fc6c1ee7db6a2.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.263fd9f4cb40f283756f394d86bdea3417e9ecd0568d6582c07a5b6bd14287d6.wasm", - "tx_withdraw.wasm": "tx_withdraw.6ce8faf6a32340178ddeaeb91a9b40e7f0433334e5c1f357964bf8e11d0077f1.wasm", - "vp_implicit.wasm": "vp_implicit.17f5c2af947ccfadce22d0fffecde1a1b4bc4ca3acd5dd8b459c3dce4afcb4e8.wasm", - "vp_masp.wasm": "vp_masp.5620cb6e555161641337d308851c760fbab4f9d3693cfd378703aa55e285249d.wasm", - "vp_testnet_faucet.wasm": "vp_testnet_faucet.362584b063cc4aaf8b72af0ed8af8d05a179ebefec596b6ab65e0ca255ec3c80.wasm", - "vp_token.wasm": "vp_token.a289723dd182fe0206e6c4cf1f426a6100787b20e2653d2fad6031e8106157f3.wasm", - "vp_user.wasm": "vp_user.b83b2d0616bb2244c8a92021665a0be749282a53fe1c493e98c330a6ed983833.wasm", - "vp_validator.wasm": "vp_validator.59e3e7729e14eeacc17d76b736d1760d59a1a6e9d6acbc9a870e1835438f524a.wasm" + "tx_bond.wasm": "tx_bond.aff95592a6394c8b766ebba48131d762b23bf86c3423279836749f64cd6173d8.wasm", + "tx_bridge_pool.wasm": "tx_bridge_pool.1e5d9b7d20e9b014f76df5995fadcc1bb5bcceff2a4d2282e0e2adab3702d559.wasm", + "tx_change_validator_commission.wasm": "tx_change_validator_commission.b34543d08b1aa73ddbbfbeaac08b8160c1738fe1a1601e6d5961ab827e3d37e0.wasm", + "tx_ibc.wasm": "tx_ibc.3700f2036627ca242b6c9139f2b3fb356487a33832e3247ace8c084733431fad.wasm", + "tx_init_account.wasm": "tx_init_account.719a06117fbe799507ada30d93f0fc726745cb38cd91fc0a5528889518fa9c67.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.657ca43087e12159032a1ed6b75a8cc441c68ce64775dffd143be4ea8280872f.wasm", + "tx_init_validator.wasm": "tx_init_validator.3f6d393c25d2c27dd828797821797b08d4bd18d3a2def95e3dd080e1fc2fcec6.wasm", + "tx_reveal_pk.wasm": "tx_reveal_pk.d870fceccbb0d6c8f6ce3513b1ef1e7cafe3c480781ccfbdb48992b469dfe5d7.wasm", + "tx_transfer.wasm": "tx_transfer.95f07ac2f3b87d75be7b05f2fc1399c7da998ee70718e7f6ab875c508705b84e.wasm", + "tx_unbond.wasm": "tx_unbond.1586c43aba2d86f1e0376d1d9850cf46f79a90fb4b2d5db0dd8f0661d88cba3c.wasm", + "tx_update_vp.wasm": "tx_update_vp.5e9abf7b7577aebf790b9a1c68568adcfdc8e42361949aae00e67803bf444eb2.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.a473f41c6024f6f9e08ebb98de918b4534360d2102c18fd478c718ab3c15352f.wasm", + "tx_withdraw.wasm": "tx_withdraw.772e3cbdfde38aaead0a07393a6a4c0b15c86aed38fb9aebb4f5efb53b5e2a71.wasm", + "vp_implicit.wasm": "vp_implicit.1440282d60cb76c659828de6f0086a955e5400c1f5b44cda9e00fbce352bda3c.wasm", + "vp_masp.wasm": "vp_masp.9ab4a7a574e0cfe56f3da89fdb430235b7922f5d4d8f60ca99933c9e19646449.wasm", + "vp_testnet_faucet.wasm": "vp_testnet_faucet.fb43fdfec782778da81eeb326677f0f079cb108444a5f7c3698a77e643363ce0.wasm", + "vp_token.wasm": "vp_token.b941d69d7b0c6de31d9e3fbb72b708e0b4d480112d318e1966a05e8af0d1c5bf.wasm", + "vp_user.wasm": "vp_user.4d1c16c065506f3ba9c0086af82740e89921e1a89c43fde79a1132201b3d6917.wasm", + "vp_validator.wasm": "vp_validator.a31e752aab8f0e56a99d40c58a0fb2b09c5d025d41f8a8c649e8751b0be0dc81.wasm" } \ No newline at end of file diff --git a/wasm/tx_template/Cargo.toml b/wasm/tx_template/Cargo.toml index 345ec86237..708225ef63 100644 --- a/wasm/tx_template/Cargo.toml +++ b/wasm/tx_template/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "tx_template" resolver = "2" -version = "0.12.0" +version = "0.12.1" [lib] crate-type = ["cdylib"] diff --git a/wasm/vp_template/Cargo.toml b/wasm/vp_template/Cargo.toml index 07ae7758db..31cb9374b0 100644 --- a/wasm/vp_template/Cargo.toml +++ b/wasm/vp_template/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "vp_template" resolver = "2" -version = "0.12.0" +version = "0.12.1" [lib] crate-type = ["cdylib"] diff --git a/wasm/wasm_source/Cargo.toml b/wasm/wasm_source/Cargo.toml index 36731a505b..d8e798cff0 100644 --- a/wasm/wasm_source/Cargo.toml +++ b/wasm/wasm_source/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_wasm" resolver = "2" -version = "0.12.0" +version = "0.12.1" [lib] crate-type = ["cdylib"] @@ -13,6 +13,7 @@ crate-type = ["cdylib"] # Newly added wasms should also be added into the Makefile `$(wasms)` list. [features] tx_bond = ["namada_tx_prelude"] +tx_bridge_pool = ["namada_tx_prelude"] tx_from_intent = ["namada_tx_prelude"] tx_ibc = ["namada_tx_prelude"] tx_init_account = ["namada_tx_prelude"] diff --git a/wasm/wasm_source/Makefile b/wasm/wasm_source/Makefile index aee4f3df8f..d54735bbd3 100644 --- a/wasm/wasm_source/Makefile +++ b/wasm/wasm_source/Makefile @@ -6,6 +6,7 @@ nightly := $(shell cat ../../rust-nightly-version) # All the wasms that can be built from this source, switched via Cargo features # Wasms can be added via the Cargo.toml `[features]` list. wasms := tx_bond +wasms += tx_bridge_pool wasms += tx_ibc wasms += tx_init_account wasms += tx_init_validator diff --git a/wasm/wasm_source/proptest-regressions/tx_bond.txt b/wasm/wasm_source/proptest-regressions/tx_bond.txt index 3a88756618..8c589d1abd 100644 --- a/wasm/wasm_source/proptest-regressions/tx_bond.txt +++ b/wasm/wasm_source/proptest-regressions/tx_bond.txt @@ -1 +1 @@ -cc e54347c5114ef29538127ba9ad68d1572af839ec63c015318fc0827818853a22 +cc f22e874350910b197cb02a4a07ec5bef18e16c0d1a39eaabaee43d1fc05ce11d diff --git a/wasm/wasm_source/src/lib.rs b/wasm/wasm_source/src/lib.rs index 98704112f2..ea9ca6ea99 100644 --- a/wasm/wasm_source/src/lib.rs +++ b/wasm/wasm_source/src/lib.rs @@ -1,5 +1,7 @@ #[cfg(feature = "tx_bond")] pub mod tx_bond; +#[cfg(feature = "tx_bridge_pool")] +pub mod tx_bridge_pool; #[cfg(feature = "tx_change_validator_commission")] pub mod tx_change_validator_commission; #[cfg(feature = "tx_ibc")] diff --git a/wasm/wasm_source/src/tx_bond.rs b/wasm/wasm_source/src/tx_bond.rs index 219a612630..76e4bb80c9 100644 --- a/wasm/wasm_source/src/tx_bond.rs +++ b/wasm/wasm_source/src/tx_bond.rs @@ -65,6 +65,8 @@ mod tests { let is_delegation = matches!( &bond.source, Some(source) if *source != bond.validator); let consensus_key = key::testing::keypair_1().ref_to(); + let eth_cold_key = key::testing::keypair_3().ref_to(); + let eth_hot_key = key::testing::keypair_4().ref_to(); let commission_rate = rust_decimal::Decimal::new(5, 2); let max_commission_rate_change = rust_decimal::Decimal::new(1, 2); @@ -72,6 +74,8 @@ mod tests { address: bond.validator.clone(), tokens: initial_stake, consensus_key, + eth_cold_key, + eth_hot_key, commission_rate, max_commission_rate_change, }]; diff --git a/wasm/wasm_source/src/tx_bridge_pool.rs b/wasm/wasm_source/src/tx_bridge_pool.rs new file mode 100644 index 0000000000..e246bdb930 --- /dev/null +++ b/wasm/wasm_source/src/tx_bridge_pool.rs @@ -0,0 +1,78 @@ +//! A tx for adding a transfer request across the Ethereum bridge +//! into the bridge pool. +use borsh::{BorshDeserialize, BorshSerialize}; +use eth_bridge::storage::{bridge_pool, native_erc20_key, wrapped_erc20s}; +use eth_bridge_pool::{GasFee, PendingTransfer, TransferToEthereum}; +use namada_tx_prelude::*; + +#[transaction] +fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { + let signed = SignedTxData::try_from_slice(&tx_data[..]) + .map_err(|e| Error::wrap("Error deserializing SignedTxData", e))?; + let transfer = PendingTransfer::try_from_slice(&signed.data.unwrap()[..]) + .map_err(|e| { + Error::wrap("Error deserializing PendingTransfer", e) + })?; + log_string("Received transfer to add to pool."); + // pay the gas fees + let GasFee { amount, ref payer } = transfer.gas_fee; + token::transfer( + ctx, + payer, + &bridge_pool::BRIDGE_POOL_ADDRESS, + &address::nam(), + None, + amount, + &None, + &None, + )?; + log_string("Token transfer succeeded."); + let TransferToEthereum { + asset, + ref sender, + amount, + .. + } = transfer.transfer; + // if minting wNam, escrow the correct amount + if asset == native_erc20_address(ctx)? { + token::transfer( + ctx, + sender, + ð_bridge::ADDRESS, + &address::nam(), + None, + amount, + &None, + &None, + )?; + } else { + // Otherwise we escrow ERC20 tokens. + let sub_prefix = wrapped_erc20s::sub_prefix(&asset); + token::transfer( + ctx, + sender, + &bridge_pool::BRIDGE_POOL_ADDRESS, + ð_bridge::ADDRESS, + Some(sub_prefix), + amount, + &None, + &None, + )?; + } + log_string("Escrow succeeded"); + // add transfer into the pool + let pending_key = bridge_pool::get_pending_key(&transfer); + ctx.write_bytes(&pending_key, transfer.try_to_vec().unwrap()) + .wrap_err("Could not write transfer to bridge pool")?; + Ok(()) +} + +fn native_erc20_address(ctx: &mut Ctx) -> EnvResult { + log_string("Trying to get wnam key"); + let addr = ctx + .read_bytes(&native_erc20_key()) + .map_err(|e| Error::wrap("Could not read wNam key from storage", e))? + .unwrap(); + log_string("Got wnam key"); + Ok(BorshDeserialize::try_from_slice(addr.as_slice()).unwrap()) +} diff --git a/wasm/wasm_source/src/tx_change_validator_commission.rs b/wasm/wasm_source/src/tx_change_validator_commission.rs index f32d83f34e..d390d0189f 100644 --- a/wasm/wasm_source/src/tx_change_validator_commission.rs +++ b/wasm/wasm_source/src/tx_change_validator_commission.rs @@ -65,10 +65,14 @@ mod tests { pos_params: PosParams, ) -> TxResult { let consensus_key = key::testing::keypair_1().ref_to(); + let eth_cold_key = key::testing::keypair_3().ref_to(); + let eth_hot_key = key::testing::keypair_4().ref_to(); let genesis_validators = [GenesisValidator { address: commission_change.validator.clone(), tokens: token::Amount::from(1_000_000), consensus_key, + eth_cold_key, + eth_hot_key, commission_rate: initial_rate, max_commission_rate_change: max_change, }]; diff --git a/wasm/wasm_source/src/tx_unbond.rs b/wasm/wasm_source/src/tx_unbond.rs index 70033286bf..deee724f50 100644 --- a/wasm/wasm_source/src/tx_unbond.rs +++ b/wasm/wasm_source/src/tx_unbond.rs @@ -63,6 +63,8 @@ mod tests { let is_delegation = matches!( &unbond.source, Some(source) if *source != unbond.validator); let consensus_key = key::testing::keypair_1().ref_to(); + let eth_cold_key = key::testing::keypair_3().ref_to(); + let eth_hot_key = key::testing::keypair_4().ref_to(); let commission_rate = rust_decimal::Decimal::new(5, 2); let max_commission_rate_change = rust_decimal::Decimal::new(1, 2); @@ -76,6 +78,8 @@ mod tests { initial_stake }, consensus_key, + eth_cold_key, + eth_hot_key, commission_rate, max_commission_rate_change, }]; diff --git a/wasm/wasm_source/src/tx_withdraw.rs b/wasm/wasm_source/src/tx_withdraw.rs index e29415f800..d331db1f36 100644 --- a/wasm/wasm_source/src/tx_withdraw.rs +++ b/wasm/wasm_source/src/tx_withdraw.rs @@ -71,6 +71,8 @@ mod tests { let is_delegation = matches!( &withdraw.source, Some(source) if *source != withdraw.validator); let consensus_key = key::testing::keypair_1().ref_to(); + let eth_cold_key = key::testing::keypair_3().ref_to(); + let eth_hot_key = key::testing::keypair_4().ref_to(); let commission_rate = rust_decimal::Decimal::new(5, 2); let max_commission_rate_change = rust_decimal::Decimal::new(1, 2); @@ -85,6 +87,8 @@ mod tests { initial_stake }, consensus_key, + eth_cold_key, + eth_hot_key, commission_rate, max_commission_rate_change, }]; diff --git a/wasm_for_tests/tx_memory_limit.wasm b/wasm_for_tests/tx_memory_limit.wasm index 9c4ea2abcc..07ad663d1e 100755 Binary files a/wasm_for_tests/tx_memory_limit.wasm and b/wasm_for_tests/tx_memory_limit.wasm differ diff --git a/wasm_for_tests/tx_mint_tokens.wasm b/wasm_for_tests/tx_mint_tokens.wasm index 72e1e8f075..c5bdd46d33 100755 Binary files a/wasm_for_tests/tx_mint_tokens.wasm and b/wasm_for_tests/tx_mint_tokens.wasm differ diff --git a/wasm_for_tests/tx_no_op.wasm b/wasm_for_tests/tx_no_op.wasm index 105a68cd1b..3e0b1ef997 100755 Binary files a/wasm_for_tests/tx_no_op.wasm and b/wasm_for_tests/tx_no_op.wasm differ diff --git a/wasm_for_tests/tx_proposal_code.wasm b/wasm_for_tests/tx_proposal_code.wasm index 584ebfdcb1..a8e7a81527 100755 Binary files a/wasm_for_tests/tx_proposal_code.wasm and b/wasm_for_tests/tx_proposal_code.wasm differ diff --git a/wasm_for_tests/tx_read_storage_key.wasm b/wasm_for_tests/tx_read_storage_key.wasm index 8f62c8211e..1b5b368833 100755 Binary files a/wasm_for_tests/tx_read_storage_key.wasm and b/wasm_for_tests/tx_read_storage_key.wasm differ diff --git a/wasm_for_tests/tx_write_storage_key.wasm b/wasm_for_tests/tx_write_storage_key.wasm index 2831315a06..471bceb93b 100755 Binary files a/wasm_for_tests/tx_write_storage_key.wasm and b/wasm_for_tests/tx_write_storage_key.wasm differ diff --git a/wasm_for_tests/vp_always_false.wasm b/wasm_for_tests/vp_always_false.wasm index db0729c87c..f5d1881c27 100755 Binary files a/wasm_for_tests/vp_always_false.wasm and b/wasm_for_tests/vp_always_false.wasm differ diff --git a/wasm_for_tests/vp_always_true.wasm b/wasm_for_tests/vp_always_true.wasm index e363102995..341d0d9c45 100755 Binary files a/wasm_for_tests/vp_always_true.wasm and b/wasm_for_tests/vp_always_true.wasm differ diff --git a/wasm_for_tests/vp_eval.wasm b/wasm_for_tests/vp_eval.wasm index 8711cfca76..4aec253164 100755 Binary files a/wasm_for_tests/vp_eval.wasm and b/wasm_for_tests/vp_eval.wasm differ diff --git a/wasm_for_tests/vp_memory_limit.wasm b/wasm_for_tests/vp_memory_limit.wasm index d7d10f8a36..0cbbe23761 100755 Binary files a/wasm_for_tests/vp_memory_limit.wasm and b/wasm_for_tests/vp_memory_limit.wasm differ diff --git a/wasm_for_tests/vp_read_storage_key.wasm b/wasm_for_tests/vp_read_storage_key.wasm index 9bf70703e9..247eb8a4a4 100755 Binary files a/wasm_for_tests/vp_read_storage_key.wasm and b/wasm_for_tests/vp_read_storage_key.wasm differ diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index 1abb89a73c..22e56dd4f3 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -98,6 +98,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ark-ed-on-bls12-381" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43b7ada17db3854f5994e74e60b18e10e818594935ee7e1d329800c117b32970" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-ff", + "ark-std", +] + [[package]] name = "ark-ff" version = "0.3.0" @@ -138,6 +150,19 @@ dependencies = [ "syn", ] +[[package]] +name = "ark-poly" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b0f78f47537c2f15706db7e98fe64cc1711dbf9def81218194e17239e53e5aa" +dependencies = [ + "ark-ff", + "ark-serialize", + "ark-std", + "derivative", + "hashbrown 0.11.2", +] + [[package]] name = "ark-serialize" version = "0.3.0" @@ -287,7 +312,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43473b34abc4b0b405efa0a250bac87eea888182b21687ee5c8115d279b0fda5" dependencies = [ - "bitvec", + "bitvec 0.22.3", "blake2s_simd 0.5.11", "byteorder", "crossbeam-channel 0.5.6", @@ -312,6 +337,15 @@ dependencies = [ "crunchy 0.1.6", ] +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bip0039" version = "0.9.0" @@ -374,10 +408,31 @@ version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5237f00a8c86130a0cc317830e558b966dd7850d48a953d998c813f01a41b527" dependencies = [ - "funty", - "radium", + "funty 1.2.0", + "radium 0.6.2", "tap", - "wyz", + "wyz 0.4.0", +] + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty 2.0.0", + "radium 0.7.0", + "tap", + "wyz 0.5.1", +] + +[[package]] +name = "blake2" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b12e5fd123190ce1c2e559308a94c9bacad77907d4c6005d9e58fe1a0689e55e" +dependencies = [ + "digest 0.10.5", ] [[package]] @@ -502,7 +557,7 @@ source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c dependencies = [ "borsh-derive-internal", "borsh-schema-derive-internal", - "proc-macro-crate", + "proc-macro-crate 0.1.5", "proc-macro2", "syn", ] @@ -533,6 +588,12 @@ version = "3.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +[[package]] +name = "byte-slice-cast" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" + [[package]] name = "bytecheck" version = "0.6.9" @@ -1221,6 +1282,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" dependencies = [ + "serde", "signature", ] @@ -1247,6 +1309,10 @@ checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ "curve25519-dalek", "ed25519", + "merlin", + "rand 0.7.3", + "serde", + "serde_bytes", "sha2 0.9.9", "zeroize", ] @@ -1334,6 +1400,50 @@ dependencies = [ "version_check", ] +[[package]] +name = "ethabi" +version = "17.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4966fba78396ff92db3b817ee71143eccd98acf0f876b8d600e585a670c5d1b" +dependencies = [ + "ethereum-types", + "hex", + "once_cell", + "regex", + "serde", + "serde_json", + "sha3 0.10.6", + "thiserror", + "uint", +] + +[[package]] +name = "ethbloom" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11da94e443c60508eb62cf256243a64da87304c2802ac2528847f79d750007ef" +dependencies = [ + "crunchy 0.2.2", + "fixed-hash", + "impl-rlp", + "impl-serde", + "tiny-keccak", +] + +[[package]] +name = "ethereum-types" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2827b94c556145446fcce834ca86b7abf0c39a805883fe20e72c5bfdb5a0dc6" +dependencies = [ + "ethbloom", + "fixed-hash", + "impl-rlp", + "impl-serde", + "primitive-types", + "uint", +] + [[package]] name = "eyre" version = "0.6.8" @@ -1359,10 +1469,47 @@ dependencies = [ "instant", ] +[[package]] +name = "ferveo" +version = "0.1.1" +source = "git+https://github.com/anoma/ferveo?rev=9e5e91c954158e7cff45c483fd06cd649a81553f#9e5e91c954158e7cff45c483fd06cd649a81553f" +dependencies = [ + "anyhow", + "ark-bls12-381", + "ark-ec", + "ark-ed-on-bls12-381", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "bincode", + "blake2", + "blake2b_simd 1.0.0", + "borsh", + "digest 0.10.5", + "ed25519-dalek", + "either", + "ferveo-common", + "group-threshold-cryptography", + "hex", + "itertools", + "measure_time", + "miracl_core", + "num", + "rand 0.7.3", + "rand 0.8.5", + "serde", + "serde_bytes", + "serde_json", + "subproductdomain", + "subtle", + "zeroize", +] + [[package]] name = "ferveo-common" version = "0.1.0" -source = "git+https://github.com/anoma/ferveo#1022ab2c7ccc689abcc05e5a08df6fb0c2a3fc65" +source = "git+https://github.com/anoma/ferveo?rev=9e5e91c954158e7cff45c483fd06cd649a81553f#9e5e91c954158e7cff45c483fd06cd649a81553f" dependencies = [ "anyhow", "ark-ec", @@ -1378,11 +1525,23 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "131655483be284720a17d74ff97592b8e76576dc25563148601df2d7c9080924" dependencies = [ - "bitvec", + "bitvec 0.22.3", "rand_core 0.6.4", "subtle", ] +[[package]] +name = "fixed-hash" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + [[package]] name = "fixedbitset" version = "0.4.2" @@ -1434,6 +1593,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1847abb9cb65d566acd5942e94aea9c8f547ad02c98e1649326fc0e8910b8b1e" +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + [[package]] name = "futures" version = "0.3.25" @@ -1592,6 +1757,30 @@ dependencies = [ "subtle", ] +[[package]] +name = "group-threshold-cryptography" +version = "0.1.0" +source = "git+https://github.com/anoma/ferveo?rev=9e5e91c954158e7cff45c483fd06cd649a81553f#9e5e91c954158e7cff45c483fd06cd649a81553f" +dependencies = [ + "anyhow", + "ark-bls12-381", + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "blake2b_simd 1.0.0", + "chacha20", + "hex", + "itertools", + "miracl_core", + "rand 0.8.5", + "rand_core 0.6.4", + "rayon", + "subproductdomain", + "thiserror", +] + [[package]] name = "gumdrop" version = "0.8.1" @@ -2012,7 +2201,7 @@ dependencies = [ "prost", "ripemd160", "sha2 0.9.9", - "sha3", + "sha3 0.9.1", "sp-std", ] @@ -2032,6 +2221,44 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-rlp" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" +dependencies = [ + "rlp", +] + +[[package]] +name = "impl-serde" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "incrementalmerkletree" version = "0.2.0" @@ -2083,6 +2310,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if 1.0.0", + "js-sys", + "wasm-bindgen", + "web-sys", ] [[package]] @@ -2115,7 +2345,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2e7baec19d4e83f9145d4891178101a604565edff9645770fc979804138b04c" dependencies = [ - "bitvec", + "bitvec 0.22.3", "bls12_381", "ff", "group", @@ -2285,7 +2515,7 @@ source = "git+https://github.com/anoma/masp?rev=bee40fc465f6afbd10558d12fe96eb17 dependencies = [ "aes", "bip0039", - "bitvec", + "bitvec 0.22.3", "blake2b_simd 1.0.0", "blake2s_simd 1.0.0", "bls12_381", @@ -2345,6 +2575,16 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" +[[package]] +name = "measure_time" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56220900f1a0923789ecd6bf25fbae8af3b2f1ff3e9e297fc9b6b8674dd4d852" +dependencies = [ + "instant", + "log", +] + [[package]] name = "memchr" version = "2.5.0" @@ -2393,6 +2633,18 @@ dependencies = [ "nonempty", ] +[[package]] +name = "merlin" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.5.1", + "zeroize", +] + [[package]] name = "mime" version = "0.3.16" @@ -2420,6 +2672,12 @@ dependencies = [ "windows-sys 0.42.0", ] +[[package]] +name = "miracl_core" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94c7128ba23c81f6471141b90f17654f89ef44a56e14b8a4dd0fddfccd655277" + [[package]] name = "moka" version = "0.8.6" @@ -2456,7 +2714,7 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "namada" -version = "0.12.0" +version = "0.12.1" dependencies = [ "async-trait", "bellman", @@ -2466,6 +2724,8 @@ dependencies = [ "clru", "data-encoding", "derivative", + "eyre", + "ferveo-common", "ibc", "ibc-proto", "itertools", @@ -2473,14 +2733,18 @@ dependencies = [ "masp_primitives", "masp_proofs", "namada_core", + "namada_ethereum_bridge", "namada_proof_of_stake", "parity-wasm", "paste", "proptest", "prost", "pwasm-utils", + "rand 0.8.5", + "rand_core 0.6.4", "rayon", "rust_decimal", + "serde", "serde_json", "sha2 0.9.9", "tempfile", @@ -2500,9 +2764,10 @@ dependencies = [ [[package]] name = "namada_core" -version = "0.12.0" +version = "0.12.1" dependencies = [ "ark-bls12-381", + "ark-ec", "ark-serialize", "bech32", "bellman", @@ -2511,7 +2776,11 @@ dependencies = [ "data-encoding", "derivative", "ed25519-consensus", + "ethabi", + "eyre", + "ferveo", "ferveo-common", + "group-threshold-cryptography", "ibc", "ibc-proto", "ics23", @@ -2519,6 +2788,7 @@ dependencies = [ "itertools", "libsecp256k1", "masp_primitives", + "num-rational", "proptest", "prost", "prost-types", @@ -2534,14 +2804,32 @@ dependencies = [ "tendermint", "tendermint-proto", "thiserror", + "tiny-keccak", "tonic-build", "tracing", "zeroize", ] +[[package]] +name = "namada_ethereum_bridge" +version = "0.11.0" +dependencies = [ + "borsh", + "eyre", + "itertools", + "namada_core", + "namada_proof_of_stake", + "serde", + "serde_json", + "tendermint", + "tendermint-proto", + "tendermint-rpc", + "tracing", +] + [[package]] name = "namada_macros" -version = "0.12.0" +version = "0.12.1" dependencies = [ "quote", "syn", @@ -2549,21 +2837,23 @@ dependencies = [ [[package]] name = "namada_proof_of_stake" -version = "0.12.0" +version = "0.12.1" dependencies = [ "borsh", "derivative", + "ferveo-common", "namada_core", "proptest", "rust_decimal", "rust_decimal_macros", + "tendermint-proto", "thiserror", "tracing", ] [[package]] name = "namada_tests" -version = "0.12.0" +version = "0.12.1" dependencies = [ "chrono", "concat-idents", @@ -2592,7 +2882,7 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.12.0" +version = "0.12.1" dependencies = [ "borsh", "masp_primitives", @@ -2607,7 +2897,7 @@ dependencies = [ [[package]] name = "namada_vm_env" -version = "0.12.0" +version = "0.12.1" dependencies = [ "borsh", "hex", @@ -2618,7 +2908,7 @@ dependencies = [ [[package]] name = "namada_vp_prelude" -version = "0.12.0" +version = "0.12.1" dependencies = [ "borsh", "namada_core", @@ -2631,7 +2921,7 @@ dependencies = [ [[package]] name = "namada_wasm_for_tests" -version = "0.12.0" +version = "0.12.1" dependencies = [ "borsh", "getrandom 0.2.8", @@ -2656,6 +2946,20 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" +[[package]] +name = "num" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + [[package]] name = "num-bigint" version = "0.4.3" @@ -2668,6 +2972,15 @@ dependencies = [ "serde", ] +[[package]] +name = "num-complex" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19" +dependencies = [ + "num-traits", +] + [[package]] name = "num-derive" version = "0.3.3" @@ -2689,6 +3002,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-rational" version = "0.4.1" @@ -2769,7 +3093,7 @@ dependencies = [ "aes", "arrayvec 0.7.2", "bigint", - "bitvec", + "bitvec 0.22.3", "blake2b_simd 1.0.0", "ff", "fpe", @@ -2796,6 +3120,32 @@ dependencies = [ "group", ] +[[package]] +name = "parity-scale-codec" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "366e44391a8af4cfd6002ef6ba072bae071a96aafca98d7d448a34c5dca38b6a" +dependencies = [ + "arrayvec 0.7.2", + "bitvec 1.0.1", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" +dependencies = [ + "proc-macro-crate 1.2.1", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "parity-wasm" version = "0.45.0" @@ -2989,6 +3339,19 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "primitive-types" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e28720988bff275df1f51b171e1b2a18c30d194c4d2b61defdacecd625a5d94a" +dependencies = [ + "fixed-hash", + "impl-codec", + "impl-rlp", + "impl-serde", + "uint", +] + [[package]] name = "proc-macro-crate" version = "0.1.5" @@ -2998,6 +3361,17 @@ dependencies = [ "toml", ] +[[package]] +name = "proc-macro-crate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" +dependencies = [ + "once_cell", + "thiserror", + "toml", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -3187,6 +3561,12 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + [[package]] name = "rand" version = "0.7.3" @@ -3473,6 +3853,16 @@ dependencies = [ "syn", ] +[[package]] +name = "rlp" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" +dependencies = [ + "bytes", + "rustc-hex", +] + [[package]] name = "rust_decimal" version = "1.26.1" @@ -3507,6 +3897,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + [[package]] name = "rustc_version" version = "0.3.3" @@ -3871,6 +4267,16 @@ dependencies = [ "opaque-debug", ] +[[package]] +name = "sha3" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" +dependencies = [ + "digest 0.10.5", + "keccak", +] + [[package]] name = "sharded-slab" version = "0.1.4" @@ -3990,6 +4396,19 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "subproductdomain" +version = "0.1.0" +source = "git+https://github.com/anoma/ferveo?rev=9e5e91c954158e7cff45c483fd06cd649a81553f#9e5e91c954158e7cff45c483fd06cd649a81553f" +dependencies = [ + "anyhow", + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", +] + [[package]] name = "subtle" version = "2.4.1" @@ -5277,6 +5696,15 @@ dependencies = [ "tap", ] +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + [[package]] name = "zcash_encoding" version = "0.0.0" @@ -5316,7 +5744,7 @@ source = "git+https://github.com/zcash/librustzcash/?rev=2425a08#2425a0869098e3b dependencies = [ "aes", "bip0039", - "bitvec", + "bitvec 0.22.3", "blake2b_simd 1.0.0", "blake2s_simd 1.0.0", "bls12_381", diff --git a/wasm_for_tests/wasm_source/Cargo.toml b/wasm_for_tests/wasm_source/Cargo.toml index dd17f4c0dc..96a000968c 100644 --- a/wasm_for_tests/wasm_source/Cargo.toml +++ b/wasm_for_tests/wasm_source/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_wasm_for_tests" resolver = "2" -version = "0.12.0" +version = "0.12.1" [lib] crate-type = ["cdylib"]