diff --git a/.changelog/v0.31.7/bug-fixes/2634-fix-ibc-shielded-transfer-back.md b/.changelog/v0.31.7/bug-fixes/2634-fix-ibc-shielded-transfer-back.md new file mode 100644 index 0000000000..931c81fe5d --- /dev/null +++ b/.changelog/v0.31.7/bug-fixes/2634-fix-ibc-shielded-transfer-back.md @@ -0,0 +1,2 @@ +- Fix ibc-gen-shielded for shielded transfers back to the origin + ([\#2634](https://github.com/anoma/namada/issues/2634)) \ No newline at end of file diff --git a/.changelog/v0.31.7/bug-fixes/2701-fix-default-node.md b/.changelog/v0.31.7/bug-fixes/2701-fix-default-node.md new file mode 100644 index 0000000000..e3f96674d9 --- /dev/null +++ b/.changelog/v0.31.7/bug-fixes/2701-fix-default-node.md @@ -0,0 +1,2 @@ +- Fixed the default `--node` argument when no specified. + ([\#2701](https://github.com/anoma/namada/pull/2701)) \ No newline at end of file diff --git a/.changelog/v0.31.7/bug-fixes/2739-router-no-pat-match.md b/.changelog/v0.31.7/bug-fixes/2739-router-no-pat-match.md new file mode 100644 index 0000000000..00ebc693c9 --- /dev/null +++ b/.changelog/v0.31.7/bug-fixes/2739-router-no-pat-match.md @@ -0,0 +1,2 @@ +- Bail from router if a nester router segment is not matched. + ([\#2739](https://github.com/anoma/namada/pull/2739)) \ No newline at end of file diff --git a/.changelog/v0.31.7/improvements/2482-refactor-finalize-block.md b/.changelog/v0.31.7/improvements/2482-refactor-finalize-block.md new file mode 100644 index 0000000000..79150d5a8e --- /dev/null +++ b/.changelog/v0.31.7/improvements/2482-refactor-finalize-block.md @@ -0,0 +1,2 @@ +- Refactored sub-systems integration in the ABCI FinalizeBlock request handler. + ([\#2482](https://github.com/anoma/namada/pull/2482)) \ No newline at end of file diff --git a/.changelog/v0.31.7/improvements/2493-refactor-token.md b/.changelog/v0.31.7/improvements/2493-refactor-token.md new file mode 100644 index 0000000000..3837e5cdc5 --- /dev/null +++ b/.changelog/v0.31.7/improvements/2493-refactor-token.md @@ -0,0 +1 @@ +- Refactored token crates. ([\#2493](https://github.com/anoma/namada/pull/2493)) \ No newline at end of file diff --git a/.changelog/v0.31.7/improvements/2503-refactor-core.md b/.changelog/v0.31.7/improvements/2503-refactor-core.md new file mode 100644 index 0000000000..ce17f4bec7 --- /dev/null +++ b/.changelog/v0.31.7/improvements/2503-refactor-core.md @@ -0,0 +1,2 @@ +- Refactored core crate to flatten the modules structure. + ([\#2503](https://github.com/anoma/namada/pull/2503)) \ No newline at end of file diff --git a/.changelog/v0.31.7/improvements/2506-refactor-gov.md b/.changelog/v0.31.7/improvements/2506-refactor-gov.md new file mode 100644 index 0000000000..3b4b0d144f --- /dev/null +++ b/.changelog/v0.31.7/improvements/2506-refactor-gov.md @@ -0,0 +1,2 @@ +- Refactored governance crate dependencies. + ([\#2506](https://github.com/anoma/namada/pull/2506)) \ No newline at end of file diff --git a/.changelog/v0.31.7/improvements/2507-refactor-testing-addrs.md b/.changelog/v0.31.7/improvements/2507-refactor-testing-addrs.md new file mode 100644 index 0000000000..a52905d84f --- /dev/null +++ b/.changelog/v0.31.7/improvements/2507-refactor-testing-addrs.md @@ -0,0 +1,2 @@ +- Hid addresses used for testing from public API. + ([\#2507](https://github.com/anoma/namada/pull/2507)) \ No newline at end of file diff --git a/.changelog/v0.31.7/improvements/2588-test-vectors-0.30.2-rev-1.md b/.changelog/v0.31.7/improvements/2588-test-vectors-0.30.2-rev-1.md new file mode 100644 index 0000000000..2a3e50a4b8 --- /dev/null +++ b/.changelog/v0.31.7/improvements/2588-test-vectors-0.30.2-rev-1.md @@ -0,0 +1,3 @@ +- Expanded the variety of test vectors generated for hardware + wallets and simplified their format in some places. + ([\#2588](https://github.com/anoma/namada/pull/2588)) \ No newline at end of file diff --git a/.changelog/v0.31.7/improvements/2606-refactor-state.md b/.changelog/v0.31.7/improvements/2606-refactor-state.md new file mode 100644 index 0000000000..b860a1a657 --- /dev/null +++ b/.changelog/v0.31.7/improvements/2606-refactor-state.md @@ -0,0 +1,2 @@ +- Refactored the state crate. + ([\#2606](https://github.com/anoma/namada/pull/2606)) \ No newline at end of file diff --git a/.changelog/v0.31.7/improvements/2670-client-slashed-bonds.md b/.changelog/v0.31.7/improvements/2670-client-slashed-bonds.md new file mode 100644 index 0000000000..9159108854 --- /dev/null +++ b/.changelog/v0.31.7/improvements/2670-client-slashed-bonds.md @@ -0,0 +1,2 @@ +- Add slashed bonds/unbonds info to the client. + ([\#2670](https://github.com/anoma/namada/pull/2670)) \ No newline at end of file diff --git a/.changelog/v0.31.7/improvements/2687-toolchain-update.md b/.changelog/v0.31.7/improvements/2687-toolchain-update.md new file mode 100644 index 0000000000..1e63e434e7 --- /dev/null +++ b/.changelog/v0.31.7/improvements/2687-toolchain-update.md @@ -0,0 +1 @@ +- Moving to rust version 1.76.0 ([#2687](https://github.com/anoma/anoma/pull/2687)) \ No newline at end of file diff --git a/.changelog/v0.31.7/summary.md b/.changelog/v0.31.7/summary.md new file mode 100644 index 0000000000..c2f31afc43 --- /dev/null +++ b/.changelog/v0.31.7/summary.md @@ -0,0 +1,2 @@ +Namada 0.31.7 is a patch release that contains code refactors, various fixes and improvements. + diff --git a/.changelog/v0.31.7/testing/2695-mock-masp-prover.md b/.changelog/v0.31.7/testing/2695-mock-masp-prover.md new file mode 100644 index 0000000000..dcde170664 --- /dev/null +++ b/.changelog/v0.31.7/testing/2695-mock-masp-prover.md @@ -0,0 +1,2 @@ +- Implemented mock transaction prover and verifier for faster testing and lower + development time. ([\#2695](https://github.com/anoma/namada/pull/2695)) \ No newline at end of file diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 70d1ecddb4..79b4229fc0 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -101,7 +101,7 @@ jobs: matrix: os: [ubuntu-latest] wasm_cache_version: ["v2"] - nightly_version: [nightly-2023-06-01] + nightly_version: [nightly-2024-02-20] mold_version: [2.4.0] steps: @@ -164,7 +164,7 @@ jobs: matrix: os: [ubuntu-latest] wasm_cache_version: ["v2"] - nightly_version: [nightly-2023-06-01] + nightly_version: [nightly-2024-02-20] mold_version: [2.4.0] steps: @@ -194,7 +194,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2023-06-01] + nightly_version: [nightly-2024-02-20] mold_version: [2.4.0] make: - name: ABCI @@ -291,7 +291,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2023-06-01] + nightly_version: [nightly-2024-02-20] mold_version: [2.4.0] make: - name: ABCI @@ -380,7 +380,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2023-06-01] + nightly_version: [nightly-2024-02-20] mold_version: [2.4.0] make: - name: ABCI @@ -568,7 +568,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2023-06-01] + nightly_version: [nightly-2024-02-20] mold_version: [2.4.0] comet_bft: [0.37.2] hermes: [1.7.4-namada-beta7] diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 7c5f589e08..889f60ccc3 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -36,7 +36,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2023-06-01] + nightly_version: [nightly-2024-02-20] make: - name: Clippy command: clippy diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index 96fa86a8ed..a64cddee73 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -22,7 +22,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2023-06-01] + nightly_version: [nightly-2024-02-20] make: - name: Audit command: audit diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 9cf28fecb3..dc013b2399 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -36,7 +36,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2023-06-01] + nightly_version: [nightly-2024-02-20] mdbook_version: [rust-lang/mdbook@v0.4.18] mdbook_mermaid: [badboy/mdbook-mermaid@v0.11.1] mdbook_linkcheck: [Michael-F-Bryan/mdbook-linkcheck@v0.7.6] diff --git a/.github/workflows/triggerable_sync.yml b/.github/workflows/triggerable_sync.yml index bf1f01d6f1..c6c4716cde 100644 --- a/.github/workflows/triggerable_sync.yml +++ b/.github/workflows/triggerable_sync.yml @@ -15,8 +15,8 @@ on: description: 'Chain ID' required: true type: string - add_peer: - description: "Optional address to add to Comet's P2P config (must be a valid `TendermintAddress`, e.g. `tcp://48d3626f425e44192f4ecf4f9f777b4d17c66ba6@54.195.145.177:26656`)." + seed_nodes: + description: "An optional list of seed node addresses (comma separated with no whitespace) to add to Comet's P2P config (must be a valid `TendermintAddress`es, e.g. `tcp://48d3626f425e44192f4ecf4f9f777b4d17c66ba6@54.195.145.177:26656`)." required: false default: '' type: string @@ -41,7 +41,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2023-06-01] + nightly_version: [nightly-2024-02-20] mold_version: [2.4.0] comet_bft: [0.37.2] name: ["Run chain sync test"] @@ -109,7 +109,7 @@ jobs: NAMADA_E2E_USE_PREBUILT_BINARIES: "true" NAMADA_E2E_KEEP_TEMP: "true" NAMADA_LOG_COLOR: "false" - NAMADA_ADD_PEER: "${{ inputs.add_peer }}" + NAMADA_SEED_NODES: "${{ inputs.seed_nodes }}" NAMADA_CHAIN_ID: "${{ inputs.chain_id }}" RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold" - name: Upload logs diff --git a/CHANGELOG.md b/CHANGELOG.md index e429bfa224..75b5a5e98d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,43 @@ # CHANGELOG +## v0.31.7 + +Namada 0.31.7 is a patch release that contains code refactors, various fixes and improvements. + +### BUG FIXES + +- Fix ibc-gen-shielded for shielded transfers back to the origin + ([\#2634](https://github.com/anoma/namada/issues/2634)) +- Fixed the default `--node` argument when no specified. + ([\#2701](https://github.com/anoma/namada/pull/2701)) +- Bail from router if a nester router segment is not matched. + ([\#2739](https://github.com/anoma/namada/pull/2739)) + +### IMPROVEMENTS + +- Refactored sub-systems integration in the ABCI FinalizeBlock request handler. + ([\#2482](https://github.com/anoma/namada/pull/2482)) +- Refactored token crates. ([\#2493](https://github.com/anoma/namada/pull/2493)) +- Refactored core crate to flatten the modules structure. + ([\#2503](https://github.com/anoma/namada/pull/2503)) +- Refactored governance crate dependencies. + ([\#2506](https://github.com/anoma/namada/pull/2506)) +- Hid addresses used for testing from public API. + ([\#2507](https://github.com/anoma/namada/pull/2507)) +- Expanded the variety of test vectors generated for hardware + wallets and simplified their format in some places. + ([\#2588](https://github.com/anoma/namada/pull/2588)) +- Refactored the state crate. + ([\#2606](https://github.com/anoma/namada/pull/2606)) +- Add slashed bonds/unbonds info to the client. + ([\#2670](https://github.com/anoma/namada/pull/2670)) +- Moving to rust version 1.76.0 ([#2687](https://github.com/anoma/anoma/pull/2687)) + +### TESTING + +- Implemented mock transaction prover and verifier for faster testing and lower + development time. ([\#2695](https://github.com/anoma/namada/pull/2695)) + ## v0.31.6 Namada 0.31.6 is a patch release that contains various fixes and improvements. diff --git a/Cargo.lock b/Cargo.lock index 0ab8f6a289..16e348aa35 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -50,9 +50,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ "getrandom 0.2.11", "once_cell", @@ -1427,9 +1427,9 @@ checksum = "b365fabc795046672053e29c954733ec3b05e4be654ab130fe8f1f94d7051f35" [[package]] name = "curve25519-dalek" -version = "4.1.1" +version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -3897,7 +3897,7 @@ dependencies = [ [[package]] name = "masp_note_encryption" version = "1.0.0" -source = "git+https://github.com/anoma/masp?tag=v1.1.0#f24691c0eb76909e3c15ae03aef294dccebd2df3" +source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" dependencies = [ "borsh", "chacha20", @@ -3910,7 +3910,7 @@ dependencies = [ [[package]] name = "masp_primitives" version = "1.0.0" -source = "git+https://github.com/anoma/masp?tag=v1.1.0#f24691c0eb76909e3c15ae03aef294dccebd2df3" +source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" dependencies = [ "aes", "bip0039", @@ -3942,7 +3942,7 @@ dependencies = [ [[package]] name = "masp_proofs" version = "1.0.0" -source = "git+https://github.com/anoma/masp?tag=v1.1.0#f24691c0eb76909e3c15ae03aef294dccebd2df3" +source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" dependencies = [ "bellman", "blake2b_simd", @@ -4122,7 +4122,7 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "namada" -version = "0.31.6" +version = "0.31.7" dependencies = [ "assert_matches", "async-trait", @@ -4154,6 +4154,7 @@ dependencies = [ "namada_ibc", "namada_parameters", "namada_proof_of_stake", + "namada_replay_protection", "namada_sdk", "namada_state", "namada_test_utils", @@ -4205,7 +4206,7 @@ dependencies = [ [[package]] name = "namada_account" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "namada_core", @@ -4217,7 +4218,7 @@ dependencies = [ [[package]] name = "namada_apps" -version = "0.31.6" +version = "0.31.7" dependencies = [ "ark-serialize", "ark-std", @@ -4308,7 +4309,7 @@ dependencies = [ [[package]] name = "namada_benchmarks" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "borsh-ext", @@ -4324,7 +4325,7 @@ dependencies = [ [[package]] name = "namada_core" -version = "0.31.6" +version = "0.31.7" dependencies = [ "bech32 0.8.1", "borsh", @@ -4342,7 +4343,6 @@ dependencies = [ "k256", "masp_primitives", "namada_macros", - "num-derive", "num-integer", "num-rational 0.4.1", "num-traits 0.2.17", @@ -4351,7 +4351,6 @@ dependencies = [ "pretty_assertions", "primitive-types", "proptest", - "prost 0.12.3", "prost-types 0.12.3", "rand 0.8.5", "rand_core 0.6.4", @@ -4374,7 +4373,7 @@ dependencies = [ [[package]] name = "namada_encoding_spec" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "itertools 0.10.5", @@ -4385,7 +4384,7 @@ dependencies = [ [[package]] name = "namada_ethereum_bridge" -version = "0.31.6" +version = "0.31.7" dependencies = [ "assert_matches", "borsh", @@ -4417,8 +4416,9 @@ dependencies = [ [[package]] name = "namada_examples" -version = "0.31.6" +version = "0.31.7" dependencies = [ + "data-encoding", "masp_proofs", "namada_sdk", "proptest", @@ -4428,7 +4428,7 @@ dependencies = [ [[package]] name = "namada_gas" -version = "0.31.6" +version = "0.31.7" dependencies = [ "assert_matches", "borsh", @@ -4440,14 +4440,14 @@ dependencies = [ [[package]] name = "namada_governance" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "itertools 0.10.5", "namada_core", "namada_macros", "namada_parameters", - "namada_state", + "namada_storage", "namada_trans_token", "proptest", "serde 1.0.193", @@ -4458,7 +4458,7 @@ dependencies = [ [[package]] name = "namada_ibc" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "ibc", @@ -4467,11 +4467,12 @@ dependencies = [ "ics23", "masp_primitives", "namada_core", + "namada_gas", "namada_governance", "namada_parameters", "namada_state", "namada_storage", - "namada_trans_token", + "namada_token", "primitive-types", "proptest", "prost 0.12.3", @@ -4482,7 +4483,7 @@ dependencies = [ [[package]] name = "namada_light_sdk" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "borsh-ext", @@ -4496,7 +4497,7 @@ dependencies = [ [[package]] name = "namada_macros" -version = "0.31.6" +version = "0.31.7" dependencies = [ "pretty_assertions", "proc-macro2", @@ -4506,7 +4507,7 @@ dependencies = [ [[package]] name = "namada_merkle_tree" -version = "0.31.6" +version = "0.31.7" dependencies = [ "assert_matches", "borsh", @@ -4522,7 +4523,7 @@ dependencies = [ [[package]] name = "namada_parameters" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "namada_core", @@ -4533,7 +4534,7 @@ dependencies = [ [[package]] name = "namada_proof_of_stake" -version = "0.31.6" +version = "0.31.7" dependencies = [ "assert_matches", "borsh", @@ -4560,9 +4561,16 @@ dependencies = [ "yansi", ] +[[package]] +name = "namada_replay_protection" +version = "0.31.7" +dependencies = [ + "namada_core", +] + [[package]] name = "namada_sdk" -version = "0.31.6" +version = "0.31.7" dependencies = [ "assert_matches", "async-trait", @@ -4626,23 +4634,24 @@ dependencies = [ [[package]] name = "namada_shielded_token" -version = "0.31.6" +version = "0.31.7" dependencies = [ + "borsh", "masp_primitives", "namada_core", "namada_parameters", - "namada_state", "namada_storage", "namada_trans_token", "proptest", "rayon", + "serde 1.0.193", "test-log", "tracing", ] [[package]] name = "namada_state" -version = "0.31.6" +version = "0.31.7" dependencies = [ "assert_matches", "borsh", @@ -4653,6 +4662,7 @@ dependencies = [ "namada_gas", "namada_merkle_tree", "namada_parameters", + "namada_replay_protection", "namada_storage", "namada_trans_token", "namada_tx", @@ -4668,13 +4678,14 @@ dependencies = [ [[package]] name = "namada_storage" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "itertools 0.10.5", "namada_core", "namada_gas", "namada_merkle_tree", + "namada_replay_protection", "namada_tx", "thiserror", "tracing", @@ -4682,7 +4693,7 @@ dependencies = [ [[package]] name = "namada_test_utils" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "namada_core", @@ -4691,7 +4702,7 @@ dependencies = [ [[package]] name = "namada_tests" -version = "0.31.6" +version = "0.31.7" dependencies = [ "assert_cmd", "async-trait", @@ -4743,7 +4754,7 @@ dependencies = [ [[package]] name = "namada_token" -version = "0.31.6" +version = "0.31.7" dependencies = [ "namada_core", "namada_shielded_token", @@ -4753,7 +4764,7 @@ dependencies = [ [[package]] name = "namada_trans_token" -version = "0.31.6" +version = "0.31.7" dependencies = [ "namada_core", "namada_storage", @@ -4761,7 +4772,7 @@ dependencies = [ [[package]] name = "namada_tx" -version = "0.31.6" +version = "0.31.7" dependencies = [ "ark-bls12-381", "assert_matches", @@ -4785,7 +4796,7 @@ dependencies = [ [[package]] name = "namada_tx_env" -version = "0.31.6" +version = "0.31.7" dependencies = [ "namada_core", "namada_storage", @@ -4793,7 +4804,7 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "masp_primitives", @@ -4815,7 +4826,7 @@ dependencies = [ [[package]] name = "namada_vm_env" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "masp_primitives", @@ -4824,7 +4835,7 @@ dependencies = [ [[package]] name = "namada_vote_ext" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "data-encoding", @@ -4835,7 +4846,7 @@ dependencies = [ [[package]] name = "namada_vp_env" -version = "0.31.6" +version = "0.31.7" dependencies = [ "derivative", "masp_primitives", @@ -4847,7 +4858,7 @@ dependencies = [ [[package]] name = "namada_vp_prelude" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "namada_account", diff --git a/Cargo.toml b/Cargo.toml index 3eb3f9d54f..be4e58274d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,6 +17,7 @@ members = [ "crates/merkle_tree", "crates/parameters", "crates/proof_of_stake", + "crates/replay_protection", "crates/sdk", "crates/namada", "crates/shielded_token", @@ -52,7 +53,7 @@ keywords = ["blockchain", "privacy", "crypto", "protocol", "network"] license = "GPL-3.0" readme = "README.md" repository = "https://github.com/anoma/namada" -version = "0.31.6" +version = "0.31.7" [workspace.dependencies] ark-bls12-381 = {version = "0.3"} @@ -114,8 +115,8 @@ ledger-transport-hid = "0.10.0" libc = "0.2.97" libloading = "0.7.2" # branch = "murisi/namada-integration" -masp_primitives = { git = "https://github.com/anoma/masp", tag = "v1.1.0" } -masp_proofs = { git = "https://github.com/anoma/masp", tag = "v1.1.0", default-features = false, features = ["local-prover"] } +masp_primitives = { git = "https://github.com/anoma/masp", rev = "30492323d98b0531fd18b6285cd94afcaa4066d2" } +masp_proofs = { git = "https://github.com/anoma/masp", rev = "30492323d98b0531fd18b6285cd94afcaa4066d2", default-features = false, features = ["local-prover"] } num256 = "0.3.5" num_cpus = "1.13.0" num-derive = "0.3.3" diff --git a/Makefile b/Makefile index 1efe02525d..8c3b8a8309 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,6 @@ package = namada NAMADA_E2E_USE_PREBUILT_BINARIES ?= true NAMADA_E2E_DEBUG ?= true RUST_BACKTRACE ?= 1 -NAMADA_MASP_TEST_SEED ?= 0 PROPTEST_CASES ?= 100 # Disable shrinking in `make test-pos-sm` for CI runs. If the test fail in CI, # we only want to get the seed. @@ -48,6 +47,7 @@ crates += namada_macros crates += namada_merkle_tree crates += namada_parameters crates += namada_proof_of_stake +crates += namada_replay_protection crates += namada_sdk crates += namada_shielded_token crates += namada_state @@ -148,14 +148,12 @@ test: test-unit test-e2e test-wasm test-benches test-coverage: # Run integration tests separately because they require `integration` - # feature (and without coverage) and run them with pre-built MASP proofs + # feature (and without coverage) $(cargo) +$(nightly) llvm-cov --output-path lcov.info \ --features namada/testing \ --lcov \ -- --skip e2e --skip pos_state_machine_test --skip integration \ -Z unstable-options --report-time && \ - NAMADA_MASP_TEST_SEED=$(NAMADA_MASP_TEST_SEED) \ - NAMADA_MASP_TEST_PROOFS=load \ $(cargo) +$(nightly) test integration:: \ --features integration \ -- -Z unstable-options --report-time @@ -175,23 +173,8 @@ test-e2e: --nocapture \ -Z unstable-options --report-time -# Run integration tests with pre-built MASP proofs +# Run integration tests test-integration: - NAMADA_MASP_TEST_SEED=$(NAMADA_MASP_TEST_SEED) \ - NAMADA_MASP_TEST_PROOFS=load \ - make test-integration-slow - -# Clear pre-built proofs, run integration tests and save the new proofs -test-integration-save-proofs: - # Clear old proofs first - rm -f test_fixtures/masp_proofs/*.bin || true - NAMADA_MASP_TEST_SEED=$(NAMADA_MASP_TEST_SEED) \ - NAMADA_MASP_TEST_PROOFS=save \ - TEST_FILTER=masp \ - make test-integration-slow - -# Run integration tests without specifying any pre-built MASP proofs option -test-integration-slow: RUST_BACKTRACE=$(RUST_BACKTRACE) \ $(cargo) +$(nightly) test $(jobs) integration::$(TEST_FILTER) --features integration \ -Z unstable-options \ diff --git a/crates/account/src/lib.rs b/crates/account/src/lib.rs index e7f79a3e55..890cf942a0 100644 --- a/crates/account/src/lib.rs +++ b/crates/account/src/lib.rs @@ -7,9 +7,9 @@ mod storage_key; mod types; use borsh::{BorshDeserialize, BorshSerialize}; -pub use namada_core::types::account::AccountPublicKeysMap; -use namada_core::types::address::Address; -use namada_core::types::key::common; +pub use namada_core::account::AccountPublicKeysMap; +use namada_core::address::Address; +use namada_core::key::common; use serde::{Deserialize, Serialize}; pub use storage::*; pub use storage_key::*; diff --git a/crates/account/src/storage.rs b/crates/account/src/storage.rs index 50b56e7808..f9f328919c 100644 --- a/crates/account/src/storage.rs +++ b/crates/account/src/storage.rs @@ -1,8 +1,6 @@ //! Cryptographic signature keys storage API -use namada_core::types::address::Address; -use namada_core::types::key::common; -use namada_core::types::storage; +use namada_core::storage; use namada_storage::{Result, StorageRead, StorageWrite}; use super::*; diff --git a/crates/account/src/storage_key.rs b/crates/account/src/storage_key.rs index 20e02906b8..baec1fba86 100644 --- a/crates/account/src/storage_key.rs +++ b/crates/account/src/storage_key.rs @@ -1,6 +1,6 @@ -use namada_core::types::address::Address; -use namada_core::types::key::common; -use namada_core::types::storage::{self, DbKeySeg}; +use namada_core::address::Address; +use namada_core::key::common; +use namada_core::storage::{self, DbKeySeg}; use namada_macros::StorageKeys; use namada_storage::collections::lazy_map::LazyMap; use namada_storage::collections::{lazy_map, LazyCollection}; diff --git a/crates/account/src/types.rs b/crates/account/src/types.rs index 43ada0da08..1fb7bc7474 100644 --- a/crates/account/src/types.rs +++ b/crates/account/src/types.rs @@ -1,6 +1,6 @@ +use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use namada_core::types::address::Address; -use namada_core::types::key::common; +use namada_core::key::common; use serde::{Deserialize, Serialize}; /// A tx data type to initialize a new established account @@ -48,8 +48,8 @@ pub struct UpdateAccount { #[cfg(any(test, feature = "testing"))] /// Tests and strategies for accounts pub mod tests { - use namada_core::types::address::testing::arb_non_internal_address; - use namada_core::types::key::testing::arb_common_pk; + use namada_core::address::testing::arb_non_internal_address; + use namada_core::key::testing::arb_common_pk; use proptest::prelude::Just; use proptest::{collection, option, prop_compose}; diff --git a/crates/apps/src/bin/namada-node/cli.rs b/crates/apps/src/bin/namada-node/cli.rs index 7477d3d6af..a0d00d48dc 100644 --- a/crates/apps/src/bin/namada-node/cli.rs +++ b/crates/apps/src/bin/namada-node/cli.rs @@ -1,7 +1,7 @@ //! Namada node CLI. use eyre::{Context, Result}; -use namada::types::time::{DateTimeUtc, Utc}; +use namada::core::time::{DateTimeUtc, Utc}; use namada_apps::cli::{self, cmds}; use namada_apps::config::ValidatorLocalConfig; use namada_apps::node::ledger; diff --git a/crates/apps/src/lib/bench_utils.rs b/crates/apps/src/lib/bench_utils.rs index 9cff3d0368..42f57f9a74 100644 --- a/crates/apps/src/lib/bench_utils.rs +++ b/crates/apps/src/lib/bench_utils.rs @@ -1,6 +1,7 @@ //! Library code for benchmarks provides a wrapper of the ledger's shell //! `BenchShell` and helper functions to generate transactions. +use std::cell::RefCell; use std::collections::BTreeSet; use std::fs::{File, OpenOptions}; use std::io::{Read, Write}; @@ -14,6 +15,16 @@ use borsh_ext::BorshSerializeExt; use masp_primitives::transaction::Transaction; use masp_primitives::zip32::ExtendedFullViewingKey; use masp_proofs::prover::LocalTxProver; +use namada::core::address::{self, Address, InternalAddress}; +use namada::core::chain::ChainId; +use namada::core::hash::Hash; +use namada::core::key::common::SecretKey; +use namada::core::masp::{ + ExtendedViewingKey, PaymentAddress, TransferSource, TransferTarget, +}; +use namada::core::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; +use namada::core::time::DateTimeUtc; +use namada::core::token::{Amount, DenominatedAmount, Transfer}; use namada::governance::storage::proposal::ProposalType; use namada::governance::InitProposalData; use namada::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; @@ -50,6 +61,7 @@ use namada::ibc::core::host::types::path::{ use namada::ibc::primitives::proto::{Any, Protobuf}; use namada::ibc::primitives::{Msg, Timestamp as IbcTimestamp}; use namada::ibc::storage::port_key; +use namada::io::StdIo; use namada::ledger::dry_run_tx; use namada::ledger::gas::TxGasMeter; use namada::ledger::ibc::storage::{channel_key, connection_key}; @@ -58,21 +70,9 @@ use namada::ledger::queries::{ Client, EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, }; use namada::state::StorageRead; -use namada::tendermint_rpc::{self}; use namada::tx::data::pos::Bond; use namada::tx::data::{TxResult, VpsResult}; use namada::tx::{Code, Data, Section, Signature, Tx}; -use namada::types::address::{self, Address, InternalAddress}; -use namada::types::chain::ChainId; -use namada::types::hash::Hash; -use namada::types::io::StdIo; -use namada::types::key::common::SecretKey; -use namada::types::masp::{ - ExtendedViewingKey, PaymentAddress, TransferSource, TransferTarget, -}; -use namada::types::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; -use namada::types::time::DateTimeUtc; -use namada::types::token::{Amount, DenominatedAmount, Transfer}; use namada::vm::wasm::run; use namada::{proof_of_stake, tendermint}; use namada_sdk::masp::{ @@ -226,7 +226,7 @@ impl Default for BenchShell { source: Some(defaults::albert_address()), }; let params = - proof_of_stake::storage::read_pos_params(&bench_shell.wl_storage) + proof_of_stake::storage::read_pos_params(&bench_shell.state) .unwrap(); let signed_tx = bench_shell.generate_tx( TX_BOND_WASM, @@ -237,7 +237,7 @@ impl Default for BenchShell { ); bench_shell.execute_tx(&signed_tx); - bench_shell.wl_storage.commit_tx(); + bench_shell.state.commit_tx(); // Initialize governance proposal let content_section = Section::ExtraData(Code::new( @@ -263,7 +263,7 @@ impl Default for BenchShell { ); bench_shell.execute_tx(&signed_tx); - bench_shell.wl_storage.commit_tx(); + bench_shell.state.commit_tx(); bench_shell.commit_block(); // Advance epoch for pos benches @@ -272,7 +272,7 @@ impl Default for BenchShell { } // Must start after current epoch debug_assert_eq!( - bench_shell.wl_storage.get_block_epoch().unwrap().next(), + bench_shell.state.get_block_epoch().unwrap().next(), voting_start_epoch ); @@ -352,7 +352,7 @@ impl BenchShell { pub fn generate_ibc_transfer_tx(&self) -> Tx { let token = PrefixedCoin { - denom: address::nam().to_string().parse().unwrap(), + denom: address::testing::nam().to_string().parse().unwrap(), amount: Amount::native_whole(1000) .to_string_native() .split('.') @@ -388,10 +388,11 @@ impl BenchShell { } pub fn execute_tx(&mut self, tx: &Tx) { + let gas_meter = + RefCell::new(TxGasMeter::new_from_sub_limit(u64::MAX.into())); run::tx( - &self.inner.wl_storage.storage, - &mut self.inner.wl_storage.write_log, - &mut TxGasMeter::new_from_sub_limit(u64::MAX.into()), + &mut self.inner.state, + &gas_meter, &TxIndex(0), tx, &mut self.inner.vp_wasm_cache, @@ -402,26 +403,29 @@ impl BenchShell { pub fn advance_epoch(&mut self) { let params = - proof_of_stake::storage::read_pos_params(&self.inner.wl_storage) + proof_of_stake::storage::read_pos_params(&self.inner.state) .unwrap(); - self.wl_storage.storage.block.epoch = - self.wl_storage.storage.block.epoch.next(); - let current_epoch = self.wl_storage.storage.block.epoch; + self.state.in_mem_mut().block.epoch = + self.state.in_mem().block.epoch.next(); + let current_epoch = self.state.in_mem().block.epoch; proof_of_stake::validator_set_update::copy_validator_sets_and_positions( - &mut self.wl_storage, + &mut self.state, ¶ms, current_epoch, current_epoch + params.pipeline_len, ) .unwrap(); + + namada::token::conversion::update_allowed_conversions(&mut self.state) + .unwrap(); } pub fn init_ibc_client_state(&mut self, addr_key: Key) -> ClientId { // Set a dummy header - self.wl_storage - .storage + self.state + .in_mem_mut() .set_header(get_dummy_header()) .unwrap(); // Set client state @@ -450,9 +454,8 @@ impl BenchShell { .unwrap() .into(); let bytes = >::encode_vec(client_state); - self.wl_storage - .storage - .write(&client_state_key, bytes) + self.state + .db_write(&client_state_key, bytes) .expect("write failed"); // Set consensus state @@ -477,10 +480,7 @@ impl BenchShell { let bytes = >::encode_vec(consensus_state); - self.wl_storage - .storage - .write(&consensus_key, bytes) - .unwrap(); + self.state.db_write(&consensus_key, bytes).unwrap(); client_id } @@ -506,9 +506,8 @@ impl BenchShell { .unwrap(); let connection_key = connection_key(&NamadaConnectionId::new(1)); - self.wl_storage - .storage - .write(&connection_key, connection.encode_vec()) + self.state + .db_write(&connection_key, connection.encode_vec()) .unwrap(); // Set port @@ -516,19 +515,12 @@ impl BenchShell { let index_key = addr_key .join(&Key::from("capabilities/index".to_string().to_db_key())); - self.wl_storage - .storage - .write(&index_key, 1u64.to_be_bytes()) - .unwrap(); - self.wl_storage - .storage - .write(&port_key, 1u64.to_be_bytes()) - .unwrap(); + self.state.db_write(&index_key, 1u64.to_be_bytes()).unwrap(); + self.state.db_write(&port_key, 1u64.to_be_bytes()).unwrap(); let cap_key = addr_key.join(&Key::from("capabilities/1".to_string().to_db_key())); - self.wl_storage - .storage - .write(&cap_key, PortId::transfer().as_bytes()) + self.state + .db_write(&cap_key, PortId::transfer().as_bytes()) .unwrap(); (addr_key, client_id) @@ -552,22 +544,19 @@ impl BenchShell { .unwrap(); let channel_key = channel_key(&NamadaPortId::transfer(), &NamadaChannelId::new(5)); - self.wl_storage - .storage - .write(&channel_key, channel.encode_vec()) + self.state + .db_write(&channel_key, channel.encode_vec()) .unwrap(); } // Update the block height in state to guarantee a valid response to the // client queries pub fn commit_block(&mut self) { + let last_height = self.inner.state.in_mem().get_last_block_height(); self.inner - .wl_storage - .storage - .begin_block( - Hash::default().into(), - self.inner.wl_storage.storage.get_last_block_height() + 1, - ) + .state + .in_mem_mut() + .begin_block(Hash::default().into(), last_height + 1) .unwrap(); self.inner.commit(); @@ -577,8 +566,8 @@ impl BenchShell { // client queries pub fn commit_masp_tx(&mut self, masp_tx: Tx) { self.last_block_masp_txs - .push((masp_tx, self.wl_storage.write_log.get_keys())); - self.wl_storage.commit_tx(); + .push((masp_tx, self.state.write_log().get_keys())); + self.state.commit_tx(); } } @@ -754,7 +743,7 @@ impl Client for BenchShell { }; let ctx = RequestCtx { - wl_storage: &self.wl_storage, + state: &self.state, event_log: self.event_log(), vp_wasm_cache: self.vp_wasm_cache.read_only(), tx_wasm_cache: self.tx_wasm_cache.read_only(), @@ -795,13 +784,12 @@ impl Client for BenchShell { // Given the way we setup and run benchmarks, the masp transactions can // only present in the last block, we can mock the previous // responses with an empty set of transactions - let last_block_txs = if height - == self.inner.wl_storage.storage.get_last_block_height() - { - self.last_block_masp_txs.clone() - } else { - vec![] - }; + let last_block_txs = + if height == self.inner.state.in_mem().get_last_block_height() { + self.last_block_masp_txs.clone() + } else { + vec![] + }; Ok(tendermint_rpc::endpoint::block::Response { block_id: tendermint::block::Id { hash: tendermint::Hash::None, @@ -860,7 +848,7 @@ impl Client for BenchShell { // We can expect all the masp tranfers to have happened only in the last // block let end_block_events = if height.value() - == self.inner.wl_storage.storage.get_last_block_height().0 + == self.inner.state.in_mem().get_last_block_height().0 { Some( self.last_block_masp_txs @@ -913,7 +901,7 @@ impl Client for BenchShell { impl Default for BenchShieldedCtx { fn default() -> Self { - let mut shell = BenchShell::default(); + let shell = BenchShell::default(); let base_dir = shell.tempdir.as_ref().canonicalize().unwrap(); // Create a global config and an empty wallet in the chain dir - this is @@ -983,10 +971,6 @@ impl Default for BenchShieldedCtx { } crate::wallet::save(&chain_ctx.wallet).unwrap(); - namada::token::conversion::update_allowed_conversions( - &mut shell.wl_storage, - ) - .unwrap(); Self { shielded: ShieldedContext::default(), @@ -1020,7 +1004,7 @@ impl BenchShieldedCtx { &[], )) .unwrap(); - let native_token = self.shell.wl_storage.storage.native_token.clone(); + let native_token = self.shell.state.in_mem().native_token.clone(); let namada = NamadaImpl::native_new( self.shell, self.wallet, @@ -1034,7 +1018,7 @@ impl BenchShieldedCtx { &namada, &source, &target, - &address::nam(), + &address::testing::nam(), denominated_amount, ), ) @@ -1050,7 +1034,7 @@ impl BenchShieldedCtx { let mut hasher = Sha256::new(); let shielded_section_hash = shielded.clone().map(|transaction| { - namada::types::hash::Hash( + namada::core::hash::Hash( Section::MaspTx(transaction) .hash(&mut hasher) .finalize_reset() @@ -1063,7 +1047,7 @@ impl BenchShieldedCtx { Transfer { source: source.effective_address(), target: target.effective_address(), - token: address::nam(), + token: address::testing::nam(), amount: DenominatedAmount::native(amount), key: None, shielded: shielded_section_hash, diff --git a/crates/apps/src/lib/cli.rs b/crates/apps/src/lib/cli.rs index bba3778d83..e77d38b90e 100644 --- a/crates/apps/src/lib/cli.rs +++ b/crates/apps/src/lib/cli.rs @@ -15,7 +15,7 @@ pub mod wallet; use clap::{ArgGroup, ArgMatches, ColorChoice}; use color_eyre::eyre::Result; -use namada::types::io::StdIo; +use namada::io::StdIo; use utils::*; pub use utils::{safe_exit, Cmd}; @@ -2926,25 +2926,24 @@ pub mod cmds { pub mod args { use std::collections::HashMap; - use std::convert::TryFrom; use std::env; use std::net::SocketAddr; use std::path::PathBuf; use std::str::FromStr; + use namada::core::address::{Address, EstablishedAddress}; + use namada::core::chain::{ChainId, ChainIdPrefix}; + use namada::core::dec::Dec; + use namada::core::ethereum_events::EthAddress; + use namada::core::keccak::KeccakHash; + use namada::core::key::*; + use namada::core::masp::PaymentAddress; + use namada::core::storage::{self, BlockHeight, Epoch}; + use namada::core::time::DateTimeUtc; + use namada::core::token; + use namada::core::token::NATIVE_MAX_DECIMAL_PLACES; use namada::ibc::core::host::types::identifiers::{ChannelId, PortId}; use namada::tx::data::GasLimit; - use namada::types::address::{Address, EstablishedAddress}; - use namada::types::chain::{ChainId, ChainIdPrefix}; - use namada::types::dec::Dec; - use namada::types::ethereum_events::EthAddress; - use namada::types::keccak::KeccakHash; - use namada::types::key::*; - use namada::types::masp::PaymentAddress; - use namada::types::storage::{self, BlockHeight, Epoch}; - use namada::types::time::DateTimeUtc; - use namada::types::token; - use namada::types::token::NATIVE_MAX_DECIMAL_PLACES; pub use namada_sdk::args::*; pub use namada_sdk::tx::{ TX_BECOME_VALIDATOR_WASM, TX_BOND_WASM, TX_BRIDGE_POOL_WASM, @@ -6298,7 +6297,7 @@ pub mod args { let find_viewing_key = |w: &mut Wallet| { w.find_viewing_key(&self.viewing_key.raw) - .map(Clone::clone) + .copied() .unwrap_or_else(|_| { eprintln!( "Unknown viewing key {}", diff --git a/crates/apps/src/lib/cli/api.rs b/crates/apps/src/lib/cli/api.rs index 29b271b4d7..bd2a99703e 100644 --- a/crates/apps/src/lib/cli/api.rs +++ b/crates/apps/src/lib/cli/api.rs @@ -1,5 +1,5 @@ +use namada::io::Io; use namada::tendermint_rpc::HttpClient; -use namada::types::io::Io; use namada_sdk::error::Error; use namada_sdk::queries::Client; use namada_sdk::rpc::wait_until_node_is_synched; diff --git a/crates/apps/src/lib/cli/client.rs b/crates/apps/src/lib/cli/client.rs index 828075a80b..566cfab888 100644 --- a/crates/apps/src/lib/cli/client.rs +++ b/crates/apps/src/lib/cli/client.rs @@ -1,6 +1,6 @@ use color_eyre::eyre::Result; use masp_primitives::zip32::ExtendedFullViewingKey; -use namada::types::io::Io; +use namada::io::Io; use namada_sdk::{Namada, NamadaImpl}; use crate::cli; diff --git a/crates/apps/src/lib/cli/context.rs b/crates/apps/src/lib/cli/context.rs index 9380d7057b..2cb586bfa0 100644 --- a/crates/apps/src/lib/cli/context.rs +++ b/crates/apps/src/lib/cli/context.rs @@ -6,14 +6,14 @@ use std::path::{Path, PathBuf}; use std::str::FromStr; use color_eyre::eyre::Result; +use namada::core::address::{Address, InternalAddress}; +use namada::core::chain::ChainId; +use namada::core::ethereum_events::EthAddress; +use namada::core::ibc::is_ibc_denom; +use namada::core::key::*; +use namada::core::masp::*; +use namada::io::Io; use namada::ledger::ibc::storage::ibc_token; -use namada::types::address::{Address, InternalAddress}; -use namada::types::chain::ChainId; -use namada::types::ethereum_events::EthAddress; -use namada::types::ibc::is_ibc_denom; -use namada::types::io::Io; -use namada::types::key::*; -use namada::types::masp::*; use namada_sdk::masp::fs::FsShieldedUtils; use namada_sdk::masp::ShieldedContext; use namada_sdk::wallet::Wallet; @@ -476,7 +476,7 @@ impl ArgFromContext for tendermint_rpc::Url { .rpc .laddr .to_string() - .replace("tpc", "http"), + .replace("tcp", "http"), ) .map_err(|err| format!("Invalid Tendermint address: {err}")); } @@ -549,7 +549,7 @@ impl ArgFromMutContext for ExtendedViewingKey { // Or it is a stored alias of one ctx.wallet .find_viewing_key(raw) - .map(Clone::clone) + .copied() .map_err(|_find_err| format!("Unknown viewing key {}", raw)) }) } diff --git a/crates/apps/src/lib/cli/relayer.rs b/crates/apps/src/lib/cli/relayer.rs index e46f568023..37f0cada62 100644 --- a/crates/apps/src/lib/cli/relayer.rs +++ b/crates/apps/src/lib/cli/relayer.rs @@ -1,5 +1,5 @@ use color_eyre::eyre::Result; -use namada::types::io::Io; +use namada::io::Io; use namada_sdk::eth_bridge::{bridge_pool, validator_set}; use crate::cli; diff --git a/crates/apps/src/lib/cli/utils.rs b/crates/apps/src/lib/cli/utils.rs index 0f50a0926a..5faee0318c 100644 --- a/crates/apps/src/lib/cli/utils.rs +++ b/crates/apps/src/lib/cli/utils.rs @@ -380,6 +380,7 @@ pub trait AppExt { /// Extensions for finding matching commands and arguments. /// The functions match commands and arguments defined in [`AppExt`]. pub trait ArgMatchesExt { + #[allow(dead_code)] fn args_parse(&self) -> T; } diff --git a/crates/apps/src/lib/cli/wallet.rs b/crates/apps/src/lib/cli/wallet.rs index 2df5d62899..3a1796f20b 100644 --- a/crates/apps/src/lib/cli/wallet.rs +++ b/crates/apps/src/lib/cli/wallet.rs @@ -12,10 +12,10 @@ use ledger_namada_rs::{BIP44Path, NamadaApp}; use ledger_transport_hid::hidapi::HidApi; use ledger_transport_hid::TransportNativeHID; use masp_primitives::zip32::ExtendedFullViewingKey; -use namada::types::address::{Address, DecodeError}; -use namada::types::io::Io; -use namada::types::key::*; -use namada::types::masp::{ExtendedSpendingKey, MaspValue, PaymentAddress}; +use namada::core::address::{Address, DecodeError}; +use namada::core::key::*; +use namada::core::masp::{ExtendedSpendingKey, MaspValue, PaymentAddress}; +use namada::io::Io; use namada_sdk::masp::find_valid_diversifier; use namada_sdk::wallet::{ DecryptionError, DerivationPath, DerivationPathError, FindKeyError, Wallet, diff --git a/crates/apps/src/lib/client/masp.rs b/crates/apps/src/lib/client/masp.rs index ad23a104c5..929f575e6b 100644 --- a/crates/apps/src/lib/client/masp.rs +++ b/crates/apps/src/lib/client/masp.rs @@ -10,7 +10,7 @@ use namada_sdk::masp::{ ShieldedUtils, }; use namada_sdk::queries::Client; -use namada_sdk::types::storage::BlockHeight; +use namada_sdk::storage::BlockHeight; use namada_sdk::{display, display_line, MaybeSend, MaybeSync}; pub async fn syncing< diff --git a/crates/apps/src/lib/client/rpc.rs b/crates/apps/src/lib/client/rpc.rs index a39cfc8252..52f2c56ba4 100644 --- a/crates/apps/src/lib/client/rpc.rs +++ b/crates/apps/src/lib/client/rpc.rs @@ -4,7 +4,6 @@ use std::cmp::Ordering; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::fs::{self, read_dir}; use std::io; -use std::iter::Iterator; use std::str::FromStr; use borsh::BorshDeserialize; @@ -15,6 +14,15 @@ use masp_primitives::merkle_tree::MerklePath; use masp_primitives::sapling::{Node, ViewingKey}; use masp_primitives::transaction::components::I128Sum; use masp_primitives::zip32::ExtendedFullViewingKey; +use namada::core::address::{Address, InternalAddress, MASP}; +use namada::core::hash::Hash; +use namada::core::ibc::{is_ibc_denom, IbcTokenHash}; +use namada::core::key::*; +use namada::core::masp::{BalanceOwner, ExtendedViewingKey, PaymentAddress}; +use namada::core::storage::{ + BlockHeight, BlockResults, Epoch, IndexedTx, Key, KeySeg, +}; +use namada::core::token::{Change, MaspDigitPos}; use namada::governance::cli::offline::{ find_offline_proposal, find_offline_votes, read_offline_files, OfflineSignedProposal, OfflineVote, @@ -29,6 +37,7 @@ use namada::governance::storage::proposal::{ use namada::governance::utils::{ compute_proposal_result, ProposalVotes, TallyType, TallyVote, VotePower, }; +use namada::io::Io; use namada::ledger::events::Event; use namada::ledger::ibc::storage::{ ibc_denom_key, ibc_denom_key_prefix, is_ibc_denom_key, @@ -38,16 +47,6 @@ use namada::ledger::pos::types::{CommissionPair, Slash}; use namada::ledger::pos::PosParams; use namada::ledger::queries::RPC; use namada::proof_of_stake::types::{ValidatorState, WeightedValidator}; -use namada::types::address::{Address, InternalAddress, MASP}; -use namada::types::hash::Hash; -use namada::types::ibc::{is_ibc_denom, IbcTokenHash}; -use namada::types::io::Io; -use namada::types::key::*; -use namada::types::masp::{BalanceOwner, ExtendedViewingKey, PaymentAddress}; -use namada::types::storage::{ - BlockHeight, BlockResults, Epoch, IndexedTx, Key, KeySeg, -}; -use namada::types::token::{Change, MaspDigitPos}; use namada::{state as storage, token}; use namada_sdk::error::{ is_pinned_error, Error, PinnedBalanceError, QueryError, @@ -380,7 +379,7 @@ pub async fn query_transparent_balance( args: args::QueryBalance, ) { let prefix = Key::from( - Address::Internal(namada::types::address::InternalAddress::Multitoken) + Address::Internal(namada::core::address::InternalAddress::Multitoken) .to_db_key(), ); match (args.token, args.owner) { @@ -1330,8 +1329,8 @@ pub async fn query_proposal_result( false, ); - if proposal.is_ok() { - proposal.unwrap() + if let Ok(proposal) = proposal { + proposal } else { edisplay_line!( context.io(), @@ -1846,16 +1845,17 @@ pub async fn query_bonds( display_line!( context.io(), &mut w; - " Remaining active bond from epoch {}: Δ {}", + " Remaining active bond from epoch {}: Δ {} (slashed {})", bond.start, - bond.amount.to_string_native() + bond.amount.to_string_native(), + bond.slashed_amount.unwrap_or_default().to_string_native() )?; } if !details.bonds_total.is_zero() { display_line!( context.io(), &mut w; - "Active (slashed) bonds total: {}", + "Active (slashable) bonds total: {}", details.bonds_total_active().to_string_native() )?; } @@ -1873,10 +1873,11 @@ pub async fn query_bonds( display_line!( context.io(), &mut w; - " Withdrawable from epoch {} (active from {}): Δ {}", + " Withdrawable from epoch {} (active from {}): Δ {} (slashed {})", unbond.withdraw, unbond.start, - unbond.amount.to_string_native() + unbond.amount.to_string_native(), + unbond.slashed_amount.unwrap_or_default().to_string_native() )?; } display_line!( @@ -1908,6 +1909,12 @@ pub async fn query_bonds( "All bonds total: {}", bonds_and_unbonds.bonds_total.to_string_native() )?; + display_line!( + context.io(), + &mut w; + "All bonds total slashed: {}", + bonds_and_unbonds.bonds_total_slashed.to_string_native() + )?; if bonds_and_unbonds.unbonds_total != bonds_and_unbonds.unbonds_total_slashed @@ -1931,6 +1938,12 @@ pub async fn query_bonds( "All unbonds total withdrawable: {}", bonds_and_unbonds.total_withdrawable.to_string_native() )?; + display_line!( + context.io(), + &mut w; + "All unbonds total slashed: {}", + bonds_and_unbonds.unbonds_total_slashed.to_string_native() + )?; Ok(()) } @@ -1946,7 +1959,6 @@ pub async fn query_bonded_stake( match args.validator { Some(validator) => { - let validator = validator; // Find bonded stake for the given validator let stake = get_validator_stake(context.client(), epoch, &validator).await; @@ -2248,7 +2260,6 @@ pub async fn query_and_print_metadata( pub async fn query_slashes(context: &N, args: args::QuerySlashes) { match args.validator { Some(validator) => { - let validator = validator; // Find slashes for the given validator let slashes: Vec = unwrap_client_response::( RPC.vp() diff --git a/crates/apps/src/lib/client/tx.rs b/crates/apps/src/lib/client/tx.rs index 6ed423d0c3..200f5ff04c 100644 --- a/crates/apps/src/lib/client/tx.rs +++ b/crates/apps/src/lib/client/tx.rs @@ -7,6 +7,9 @@ use borsh_ext::BorshSerializeExt; use ledger_namada_rs::{BIP44Path, NamadaApp}; use ledger_transport_hid::hidapi::HidApi; use ledger_transport_hid::TransportNativeHID; +use namada::core::address::{Address, ImplicitAddress}; +use namada::core::dec::Dec; +use namada::core::key::{self, *}; use namada::governance::cli::offline::{ OfflineProposal, OfflineSignedProposal, OfflineVote, }; @@ -15,13 +18,10 @@ use namada::governance::cli::onchain::{ }; use namada::governance::ProposalVote; use namada::ibc::apps::transfer::types::Memo; +use namada::io::Io; use namada::state::EPOCH_SWITCH_BLOCKS_DELAY; use namada::tx::data::pos::{BecomeValidator, ConsensusKeyChange}; use namada::tx::{CompressedSignature, Section, Signer, Tx}; -use namada::types::address::{Address, ImplicitAddress}; -use namada::types::dec::Dec; -use namada::types::io::Io; -use namada::types::key::{self, *}; use namada_sdk::rpc::{InnerTxResult, TxBroadcastData, TxResponse}; use namada_sdk::signing::validate_fee_and_gen_unshield; use namada_sdk::wallet::alias::validator_consensus_key; diff --git a/crates/apps/src/lib/client/utils.rs b/crates/apps/src/lib/client/utils.rs index 7d011d3020..3082200231 100644 --- a/crates/apps/src/lib/client/utils.rs +++ b/crates/apps/src/lib/client/utils.rs @@ -10,12 +10,12 @@ use flate2::read::GzDecoder; use flate2::write::GzEncoder; use flate2::Compression; use itertools::Either; -use namada::types::chain::ChainId; -use namada::types::dec::Dec; -use namada::types::key::*; -use namada::types::string_encoding::StringEncoded; -use namada::types::token; -use namada::types::uint::Uint; +use namada::core::chain::ChainId; +use namada::core::dec::Dec; +use namada::core::key::*; +use namada::core::string_encoding::StringEncoded; +use namada::core::token; +use namada::core::uint::Uint; use namada::vm::validate_untrusted_wasm; use namada_sdk::wallet::{alias, Wallet}; use prost::bytes::Bytes; diff --git a/crates/apps/src/lib/config/ethereum_bridge/ledger.rs b/crates/apps/src/lib/config/ethereum_bridge/ledger.rs index 55694439b5..c1117c44b5 100644 --- a/crates/apps/src/lib/config/ethereum_bridge/ledger.rs +++ b/crates/apps/src/lib/config/ethereum_bridge/ledger.rs @@ -1,6 +1,6 @@ //! Runtime configuration for a validator node. #[allow(unused_imports)] -use namada::types::ethereum_events::EthereumEvent; +use namada::core::ethereum_events::EthereumEvent; use serde::{Deserialize, Serialize}; /// Default [Ethereum JSON-RPC](https://ethereum.org/en/developers/docs/apis/json-rpc/) endpoint used by the oracle diff --git a/crates/apps/src/lib/config/genesis.rs b/crates/apps/src/lib/config/genesis.rs index 2b95b00011..bb75d4da76 100644 --- a/crates/apps/src/lib/config/genesis.rs +++ b/crates/apps/src/lib/config/genesis.rs @@ -1,3 +1,4 @@ +#![allow(clippy::non_canonical_partial_ord_impl)] //! The parameters used for the chain's genesis pub mod chain; @@ -11,18 +12,19 @@ use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; use derivative::Derivative; +use namada::core::address::{Address, EstablishedAddress}; +use namada::core::chain::ProposalBytes; +use namada::core::key::*; +use namada::core::storage; +use namada::core::string_encoding::StringEncoded; +use namada::core::time::{DateTimeUtc, DurationSecs}; +use namada::core::token::Denomination; use namada::governance::parameters::GovernanceParameters; use namada::governance::pgf::parameters::PgfParameters; use namada::ledger::eth_bridge::EthereumBridgeParams; use namada::ledger::parameters::EpochDuration; use namada::ledger::pos::{Dec, GenesisValidator, OwnedPosParams}; -use namada::types::address::{Address, EstablishedAddress}; -use namada::types::chain::ProposalBytes; -use namada::types::key::*; -use namada::types::string_encoding::StringEncoded; -use namada::types::time::{DateTimeUtc, DurationSecs}; -use namada::types::token::Denomination; -use namada::types::{storage, token}; +use namada::token; use serde::{Deserialize, Serialize}; #[cfg(all(any(test, feature = "benches"), not(feature = "integration")))] @@ -220,7 +222,7 @@ pub struct TokenAccount { #[derivative(PartialOrd = "ignore", Ord = "ignore")] pub balances: HashMap, /// Token parameters - pub masp_params: Option, + pub masp_params: Option, /// Token inflation from the last epoch (read + write for every epoch) pub last_inflation: token::Amount, /// Token shielded ratio from the last epoch (read + write for every epoch) @@ -304,13 +306,13 @@ pub fn make_dev_genesis( use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::time::Duration; + use namada::core::address::testing::wnam; + use namada::core::chain::ChainIdPrefix; + use namada::core::ethereum_events::EthAddress; + use namada::core::key::*; use namada::ledger::eth_bridge::{Contracts, UpgradeableContract}; use namada::ledger::pos::types::ValidatorMetaData; use namada::tx::standalone_signature; - use namada::types::address::wnam; - use namada::types::chain::ChainIdPrefix; - use namada::types::ethereum_events::EthAddress; - use namada::types::key::*; use namada_sdk::wallet::alias::Alias; use crate::config::genesis::chain::{finalize, DeriveEstablishedAddress}; @@ -392,7 +394,7 @@ pub fn make_dev_genesis( .validator_account .as_ref() .unwrap() - .get(0) + .first() .unwrap(); let genesis_addr = GenesisAddress::EstablishedAddress(tx.tx.data.address.raw.clone()); @@ -400,7 +402,7 @@ pub fn make_dev_genesis( let balance = *nam_balances.0.get(&genesis_addr).unwrap(); let bonded = { let bond = - genesis.transactions.bond.as_mut().unwrap().get(0).unwrap(); + genesis.transactions.bond.as_mut().unwrap().first().unwrap(); bond.amount }; @@ -542,8 +544,8 @@ pub fn make_dev_genesis( #[cfg(test)] pub mod tests { use borsh_ext::BorshSerializeExt; - use namada::types::address::testing::gen_established_address; - use namada::types::key::*; + use namada::core::address::testing::gen_established_address; + use namada::core::key::*; use rand::prelude::ThreadRng; use rand::thread_rng; diff --git a/crates/apps/src/lib/config/genesis/chain.rs b/crates/apps/src/lib/config/genesis/chain.rs index 88a95ec9f5..a17e6fe34b 100644 --- a/crates/apps/src/lib/config/genesis/chain.rs +++ b/crates/apps/src/lib/config/genesis/chain.rs @@ -4,17 +4,17 @@ use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; use borsh_ext::BorshSerializeExt; -use namada::ledger::parameters::EpochDuration; -use namada::types::address::{ +use namada::address::InternalAddress; +use namada::core::address::{ Address, EstablishedAddress, EstablishedAddressGen, }; -use namada::types::chain::{ChainId, ChainIdPrefix}; -use namada::types::dec::Dec; -use namada::types::hash::Hash; -use namada::types::key::{common, RefTo}; -use namada::types::time::{DateTimeUtc, DurationNanos, Rfc3339String}; -use namada::types::token::Amount; -use namada_sdk::types::address::InternalAddress; +use namada::core::chain::{ChainId, ChainIdPrefix}; +use namada::core::dec::Dec; +use namada::core::hash::Hash; +use namada::core::key::{common, RefTo}; +use namada::core::time::{DateTimeUtc, DurationNanos, Rfc3339String}; +use namada::core::token::Amount; +use namada::ledger::parameters::EpochDuration; use namada_sdk::wallet::store::AddressVpType; use namada_sdk::wallet::{pre_genesis, Wallet}; use serde::{Deserialize, Serialize}; @@ -310,11 +310,11 @@ impl Finalized { let min_duration: i64 = 60 * 60 * 24 * 365 / (epochs_per_year as i64); let epoch_duration = EpochDuration { min_num_of_blocks, - min_duration: namada::types::time::Duration::seconds(min_duration) + min_duration: namada::core::time::Duration::seconds(min_duration) .into(), }; let max_expected_time_per_block = - namada::types::time::Duration::seconds(max_expected_time_per_block) + namada::core::time::Duration::seconds(max_expected_time_per_block) .into(); let vp_allowlist = vp_allowlist.unwrap_or_default(); let tx_allowlist = tx_allowlist.unwrap_or_default(); @@ -810,7 +810,6 @@ pub struct Metadata { #[cfg(test)] mod test { use std::path::PathBuf; - use std::str::FromStr; use super::*; diff --git a/crates/apps/src/lib/config/genesis/templates.rs b/crates/apps/src/lib/config/genesis/templates.rs index 010d33fb16..696880552a 100644 --- a/crates/apps/src/lib/config/genesis/templates.rs +++ b/crates/apps/src/lib/config/genesis/templates.rs @@ -5,16 +5,17 @@ use std::marker::PhantomData; use std::path::Path; use borsh::{BorshDeserialize, BorshSerialize}; +use namada::core::address::Address; +use namada::core::chain::ProposalBytes; +use namada::core::dec::Dec; +use namada::core::ethereum_structs; +use namada::core::token::{ + Amount, DenominatedAmount, Denomination, NATIVE_MAX_DECIMAL_PLACES, +}; use namada::eth_bridge::storage::parameters::{ Contracts, Erc20WhitelistEntry, MinimumConfirmations, }; -use namada::types::address::Address; -use namada::types::chain::ProposalBytes; -use namada::types::dec::Dec; -use namada::types::token::{ - Amount, DenominatedAmount, Denomination, NATIVE_MAX_DECIMAL_PLACES, -}; -use namada::types::{ethereum_structs, token}; +use namada::token; use serde::{Deserialize, Serialize}; use super::transactions::{self, Transactions}; @@ -209,7 +210,7 @@ pub struct Tokens { )] pub struct TokenConfig { pub denom: Denomination, - pub masp_params: Option, + pub masp_params: Option, } #[derive( @@ -947,9 +948,9 @@ mod tests { use std::fs; use std::path::PathBuf; - use namada::types::key; - use namada::types::key::RefTo; - use namada::types::string_encoding::StringEncoded; + use namada::core::key; + use namada::core::key::RefTo; + use namada::core::string_encoding::StringEncoded; use tempfile::tempdir; use super::*; diff --git a/crates/apps/src/lib/config/genesis/transactions.rs b/crates/apps/src/lib/config/genesis/transactions.rs index 2e6ab12932..67a1406709 100644 --- a/crates/apps/src/lib/config/genesis/transactions.rs +++ b/crates/apps/src/lib/config/genesis/transactions.rs @@ -11,22 +11,22 @@ use ledger_namada_rs::NamadaApp; use ledger_transport_hid::hidapi::HidApi; use ledger_transport_hid::TransportNativeHID; use namada::account::AccountPublicKeysMap; +use namada::core::address::{Address, EstablishedAddress}; +use namada::core::chain::ChainId; +use namada::core::dec::Dec; +use namada::core::key::{ + common, ed25519, RefTo, SerializeWithBorsh, SigScheme, +}; +use namada::core::string_encoding::StringEncoded; +use namada::core::time::DateTimeUtc; +use namada::core::token; +use namada::core::token::{DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES}; use namada::ledger::pos::common::PublicKey; use namada::ledger::pos::types::ValidatorMetaData; use namada::tx::data::{pos, Fee, TxType}; use namada::tx::{ verify_standalone_sig, Code, Commitment, Data, Section, SignatureIndex, Tx, }; -use namada::types::address::{nam, Address, EstablishedAddress}; -use namada::types::chain::ChainId; -use namada::types::dec::Dec; -use namada::types::key::{ - common, ed25519, RefTo, SerializeWithBorsh, SigScheme, -}; -use namada::types::string_encoding::StringEncoded; -use namada::types::time::DateTimeUtc; -use namada::types::token; -use namada::types::token::{DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES}; use namada_sdk::args::Tx as TxArgs; use namada_sdk::signing::{sign_tx, SigningTxData}; use namada_sdk::tx::{TX_BECOME_VALIDATOR_WASM, TX_BOND_WASM}; @@ -79,7 +79,7 @@ fn get_tx_args(use_device: bool) -> TxArgs { wallet_alias_force: false, fee_amount: None, wrapper_fee_payer: None, - fee_token: nam(), + fee_token: genesis_fee_token_address(), fee_unshield: None, gas_limit: Default::default(), expiration: None, @@ -118,13 +118,13 @@ fn get_tx_to_sign(tag: impl AsRef, data: impl BorshSerialize) -> Tx { salt: [0; 8], data: data.serialize_to_vec(), }); - let pk = get_sentinel_pubkey(); + let fee_payer = genesis_fee_payer_pk(); tx.add_wrapper( Fee { amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: Address::from(&pk), + token: genesis_fee_token_address(), }, - pk, + fee_payer, Default::default(), Default::default(), None, @@ -132,12 +132,17 @@ fn get_tx_to_sign(tag: impl AsRef, data: impl BorshSerialize) -> Tx { tx } -/// Get a dummy public key. +/// Get a dummy public key for a fee payer - there are no fees for genesis tx #[inline] -fn get_sentinel_pubkey() -> common::PublicKey { +fn genesis_fee_payer_pk() -> common::PublicKey { common::SecretKey::Ed25519(ed25519::SigScheme::from_bytes([0; 32])).ref_to() } +/// Dummy genesis fee token address - there are no fees for genesis tx +fn genesis_fee_token_address() -> Address { + Address::from(&genesis_fee_payer_pk()) +} + pub struct GenesisValidatorData { pub address: EstablishedAddress, pub commission_rate: Dec, @@ -730,7 +735,7 @@ impl Signed { account_public_keys_map: Some(pks.iter().cloned().collect()), public_keys: pks.clone(), threshold, - fee_payer: get_sentinel_pubkey(), + fee_payer: genesis_fee_payer_pk(), }; let mut tx = self.data.tx_to_sign(); @@ -758,7 +763,7 @@ impl Signed { _parts: HashSet, _user: (), ) -> Result { - if pubkey == get_sentinel_pubkey() { + if pubkey == genesis_fee_payer_pk() { Ok(tx) } else { Err(namada_sdk::error::Error::Other(format!( diff --git a/crates/apps/src/lib/config/genesis/utils.rs b/crates/apps/src/lib/config/genesis/utils.rs index a17f09843d..a67f259db0 100644 --- a/crates/apps/src/lib/config/genesis/utils.rs +++ b/crates/apps/src/lib/config/genesis/utils.rs @@ -4,8 +4,8 @@ use std::path::Path; use eyre::Context; use ledger_namada_rs::NamadaApp; use ledger_transport_hid::TransportNativeHID; +use namada::core::key::common; use namada::tx::Tx; -use namada::types::key::common; use namada_sdk::wallet::Wallet; use namada_sdk::{error, signing}; use serde::de::DeserializeOwned; diff --git a/crates/apps/src/lib/config/global.rs b/crates/apps/src/lib/config/global.rs index 1dc1380635..f6d917c770 100644 --- a/crates/apps/src/lib/config/global.rs +++ b/crates/apps/src/lib/config/global.rs @@ -4,7 +4,7 @@ use std::fs::{create_dir_all, File}; use std::io::Write; use std::path::{Path, PathBuf}; -use namada::types::chain::ChainId; +use namada::core::chain::ChainId; use serde::{Deserialize, Serialize}; use thiserror::Error; diff --git a/crates/apps/src/lib/config/mod.rs b/crates/apps/src/lib/config/mod.rs index 6d18586408..782ffee77d 100644 --- a/crates/apps/src/lib/config/mod.rs +++ b/crates/apps/src/lib/config/mod.rs @@ -11,9 +11,9 @@ use std::io::Write; use std::path::{Path, PathBuf}; use directories::ProjectDirs; -use namada::types::chain::ChainId; -use namada::types::storage::BlockHeight; -use namada::types::time::Rfc3339String; +use namada::core::chain::ChainId; +use namada::core::storage::BlockHeight; +use namada::core::time::Rfc3339String; use serde::{Deserialize, Serialize}; use thiserror::Error; @@ -45,7 +45,7 @@ pub struct Config { #[derive(Debug, Serialize, Deserialize)] pub struct ValidatorLocalConfig { pub accepted_gas_tokens: - HashMap, + HashMap, } #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] diff --git a/crates/apps/src/lib/node/ledger/abortable.rs b/crates/apps/src/lib/node/ledger/abortable.rs index ceb4a4c892..8c8a516582 100644 --- a/crates/apps/src/lib/node/ledger/abortable.rs +++ b/crates/apps/src/lib/node/ledger/abortable.rs @@ -1,7 +1,7 @@ use std::future::Future; use std::pin::Pin; -use namada::types::control_flow::{install_shutdown_signal, ShutdownSignal}; +use namada::control_flow::{install_shutdown_signal, ShutdownSignal}; use tokio::sync::mpsc::{self, UnboundedReceiver, UnboundedSender}; use tokio::task::JoinHandle; @@ -77,8 +77,8 @@ impl AbortableSpawner { /// This future will resolve when: /// /// 1. A user sends a shutdown signal (e.g. SIGINT), or... - /// 2. One of the child processes of the ledger terminates, - /// which generates a notification upon dropping an [`Aborter`]. + /// 2. One of the child processes of the ledger terminates, which + /// generates a notification upon dropping an [`Aborter`]. /// /// These two scenarios are represented by the [`AborterStatus`] enum. pub async fn wait_for_abort(mut self) -> AborterStatus { diff --git a/crates/apps/src/lib/node/ledger/broadcaster.rs b/crates/apps/src/lib/node/ledger/broadcaster.rs index 42adfed934..c2b6a38096 100644 --- a/crates/apps/src/lib/node/ledger/broadcaster.rs +++ b/crates/apps/src/lib/node/ledger/broadcaster.rs @@ -1,8 +1,8 @@ use std::net::SocketAddr; use std::ops::ControlFlow; -use namada::types::control_flow::time; -use namada::types::time::{DateTimeUtc, Utc}; +use namada::control_flow::time; +use namada::time::{DateTimeUtc, Utc}; use tokio::sync::mpsc::UnboundedReceiver; use crate::facade::tendermint_rpc::{Client, HttpClient}; diff --git a/crates/apps/src/lib/node/ledger/ethereum_oracle/events.rs b/crates/apps/src/lib/node/ledger/ethereum_oracle/events.rs index a750ffbc15..f3e040abbe 100644 --- a/crates/apps/src/lib/node/ledger/ethereum_oracle/events.rs +++ b/crates/apps/src/lib/node/ledger/ethereum_oracle/events.rs @@ -1,4 +1,5 @@ pub mod eth_events { + #![allow(dead_code)] use std::fmt::Debug; use std::str::FromStr; @@ -7,14 +8,14 @@ pub mod eth_events { ValidatorSetUpdateFilter, }; use ethbridge_events::{DynEventCodec, Events as RawEvents}; - use namada::types::address::Address; - use namada::types::ethereum_events::{ + use namada::core::address::Address; + use namada::core::ethereum_events::{ EthAddress, EthereumEvent, TransferToEthereum, TransferToNamada, Uint, }; - use namada::types::ethereum_structs; - use namada::types::hash::Hash; - use namada::types::keccak::KeccakHash; - use namada::types::token::Amount; + use namada::core::ethereum_structs; + use namada::core::hash::Hash; + use namada::core::keccak::KeccakHash; + use namada::core::token::Amount; use num256::Uint256; use thiserror::Error; @@ -119,6 +120,7 @@ pub mod eth_events { } } + #[allow(unused_macros)] macro_rules! parse_method { ($name:ident -> $type:ty) => { fn $name(self) -> Result<$type> { @@ -176,7 +178,7 @@ pub mod eth_events { impl Parse for ethabi::Uint { fn parse_amount(self) -> Result { let uint = { - use namada::types::uint::Uint as NamadaUint; + use namada::core::uint::Uint as NamadaUint; let mut num_buf = [0; 32]; self.to_little_endian(&mut num_buf); NamadaUint::from_little_endian(&num_buf) diff --git a/crates/apps/src/lib/node/ledger/ethereum_oracle/mod.rs b/crates/apps/src/lib/node/ledger/ethereum_oracle/mod.rs index b0488226f3..777d133769 100644 --- a/crates/apps/src/lib/node/ledger/ethereum_oracle/mod.rs +++ b/crates/apps/src/lib/node/ledger/ethereum_oracle/mod.rs @@ -8,13 +8,12 @@ use async_trait::async_trait; use ethabi::Address; use ethbridge_events::{event_codecs, EventKind}; use itertools::Either; -use namada::core::hints; +use namada::control_flow::time::{Constant, Duration, Instant, Sleep}; +use namada::core::ethereum_events::EthereumEvent; +use namada::core::{ethereum_structs, hints}; use namada::eth_bridge::ethers; use namada::eth_bridge::ethers::providers::{Http, Middleware, Provider}; use namada::eth_bridge::oracle::config::Config; -use namada::types::control_flow::time::{Constant, Duration, Instant, Sleep}; -use namada::types::ethereum_events::EthereumEvent; -use namada::types::ethereum_structs; use namada_sdk::eth_bridge::{eth_syncing_status_timeout, SyncStatus}; use num256::Uint256; use thiserror::Error; @@ -602,7 +601,7 @@ fn process_queue( pub mod last_processed_block { //! Functionality to do with publishing which blocks we have processed. - use namada::types::ethereum_structs; + use namada::core::ethereum_structs; use tokio::sync::watch; pub type Sender = watch::Sender>; @@ -621,11 +620,11 @@ mod test_oracle { use std::num::NonZeroU64; use ethbridge_bridge_events::{TransferToChainFilter, TransferToErcFilter}; + use namada::core::address::testing::gen_established_address; + use namada::core::ethereum_events::{EthAddress, TransferToEthereum}; + use namada::core::hash::Hash; use namada::eth_bridge::ethers::types::H160; use namada::eth_bridge::structs::Erc20Transfer; - use namada::types::address::testing::gen_established_address; - use namada::types::ethereum_events::{EthAddress, TransferToEthereum}; - use namada::types::hash::Hash; use tokio::sync::oneshot::channel; use tokio::time::timeout; diff --git a/crates/apps/src/lib/node/ledger/ethereum_oracle/test_tools/events_endpoint.rs b/crates/apps/src/lib/node/ledger/ethereum_oracle/test_tools/events_endpoint.rs index 650a0ad279..2826bc8dbd 100644 --- a/crates/apps/src/lib/node/ledger/ethereum_oracle/test_tools/events_endpoint.rs +++ b/crates/apps/src/lib/node/ledger/ethereum_oracle/test_tools/events_endpoint.rs @@ -1,7 +1,7 @@ use std::net::SocketAddr; use borsh::BorshDeserialize; -use namada::types::ethereum_events::EthereumEvent; +use namada::core::ethereum_events::EthereumEvent; use tokio::sync::mpsc::Sender as BoundedSender; use tokio::sync::oneshot::{Receiver, Sender}; use warp::reply::WithStatus; diff --git a/crates/apps/src/lib/node/ledger/ethereum_oracle/test_tools/mod.rs b/crates/apps/src/lib/node/ledger/ethereum_oracle/test_tools/mod.rs index 460e651df5..4d853a9515 100644 --- a/crates/apps/src/lib/node/ledger/ethereum_oracle/test_tools/mod.rs +++ b/crates/apps/src/lib/node/ledger/ethereum_oracle/test_tools/mod.rs @@ -67,8 +67,8 @@ pub mod mock_web3_client { use async_trait::async_trait; use ethabi::Address; use ethbridge_events::EventCodec; - use namada::types::control_flow::time::{Duration, Instant}; - use namada::types::ethereum_structs::BlockHeight; + use namada::control_flow::time::{Duration, Instant}; + use namada::core::ethereum_structs::BlockHeight; use num256::Uint256; use tokio::sync::mpsc::{ unbounded_channel, UnboundedReceiver, UnboundedSender, diff --git a/crates/apps/src/lib/node/ledger/mod.rs b/crates/apps/src/lib/node/ledger/mod.rs index 8075081c45..5c21f687e5 100644 --- a/crates/apps/src/lib/node/ledger/mod.rs +++ b/crates/apps/src/lib/node/ledger/mod.rs @@ -14,11 +14,12 @@ use std::thread; use byte_unit::Byte; use futures::future::TryFutureExt; +use namada::core::storage::Key; +use namada::core::time::DateTimeUtc; use namada::eth_bridge::ethers::providers::{Http, Provider}; use namada::governance::storage::keys as governance_storage; -use namada::types::storage::Key; -use namada::types::time::DateTimeUtc; -use namada_sdk::tendermint::abci::request::CheckTxKind; +use namada::tendermint::abci::request::CheckTxKind; +use namada_sdk::state::StateRead; use once_cell::unsync::Lazy; use sysinfo::{RefreshKind, System, SystemExt}; use tokio::sync::mpsc; @@ -66,16 +67,15 @@ const ENV_VAR_RAYON_THREADS: &str = "NAMADA_RAYON_THREADS"; impl Shell { fn load_proposals(&mut self) { let proposals_key = governance_storage::get_commiting_proposals_prefix( - self.wl_storage.storage.last_epoch.0, + self.state.in_mem().last_epoch.0, ); - let (proposal_iter, _) = - self.wl_storage.storage.iter_prefix(&proposals_key); + let (proposal_iter, _) = self.state.db_iter_prefix(&proposals_key); for (key, _, _) in proposal_iter { let key = Key::from_str(key.as_str()).expect("Key should be parsable"); if governance_storage::get_commit_proposal_epoch(&key).unwrap() - != self.wl_storage.storage.last_epoch.0 + != self.state.in_mem().last_epoch.0 { // NOTE: `iter_prefix` iterate over the matching prefix. In this // case a proposal with grace_epoch 110 will be @@ -753,8 +753,8 @@ pub fn test_genesis_files( genesis: config::genesis::chain::Finalized, wasm_dir: PathBuf, ) { + use namada::core::hash::Sha256Hasher; use namada::state::mockdb::MockDB; - use namada::types::hash::Sha256Hasher; // Channels for validators to send protocol txs to be broadcast to the // broadcaster service diff --git a/crates/apps/src/lib/node/ledger/shell/block_alloc.rs b/crates/apps/src/lib/node/ledger/shell/block_alloc.rs index 1fa09fa870..09bb6d7847 100644 --- a/crates/apps/src/lib/node/ledger/shell/block_alloc.rs +++ b/crates/apps/src/lib/node/ledger/shell/block_alloc.rs @@ -56,7 +56,7 @@ pub mod states; use std::marker::PhantomData; use namada::proof_of_stake::pos_queries::PosQueries; -use namada::state::{self, WlStorage}; +use namada::state::{self, WlState}; #[allow(unused_imports)] use crate::facade::tendermint_proto::abci::RequestPrepareProposal; @@ -141,14 +141,14 @@ pub struct BlockAllocator { decrypted_txs: TxBin, } -impl From<&WlStorage> +impl From<&WlState> for BlockAllocator> where D: 'static + state::DB + for<'iter> state::DBIter<'iter>, H: 'static + state::StorageHasher, { #[inline] - fn from(storage: &WlStorage) -> Self { + fn from(storage: &WlState) -> Self { Self::init( storage.pos_queries().get_max_proposal_bytes().get(), namada::parameters::get_max_block_gas(storage).unwrap(), diff --git a/crates/apps/src/lib/node/ledger/shell/block_alloc/states.rs b/crates/apps/src/lib/node/ledger/shell/block_alloc/states.rs index c5e0343ccf..7163cdf877 100644 --- a/crates/apps/src/lib/node/ledger/shell/block_alloc/states.rs +++ b/crates/apps/src/lib/node/ledger/shell/block_alloc/states.rs @@ -6,18 +6,17 @@ //! //! The state machine moves through the following state DAG: //! -//! 1. [`BuildingEncryptedTxBatch`] - the initial state. In -//! this state, we populate a block with DKG encrypted txs. -//! This state supports two modes of operation, which you can -//! think of as two sub-states: +//! 1. [`BuildingEncryptedTxBatch`] - the initial state. In this state, we +//! populate a block with DKG encrypted txs. This state supports two modes of +//! operation, which you can think of as two sub-states: //! * [`WithoutEncryptedTxs`] - When this mode is active, no encrypted txs are //! included in a block proposal. //! * [`WithEncryptedTxs`] - When this mode is active, we are able to include //! encrypted txs in a block proposal. -//! 2. [`BuildingDecryptedTxBatch`] - the second state. In -//! this state, we populate a block with DKG decrypted txs. -//! 3. [`BuildingProtocolTxBatch`] - the third state. In -//! this state, we populate a block with protocol txs. +//! 2. [`BuildingDecryptedTxBatch`] - the second state. In this state, we +//! populate a block with DKG decrypted txs. +//! 3. [`BuildingProtocolTxBatch`] - the third state. In this state, we populate +//! a block with protocol txs. mod decrypted_txs; mod encrypted_txs; diff --git a/crates/apps/src/lib/node/ledger/shell/finalize_block.rs b/crates/apps/src/lib/node/ledger/shell/finalize_block.rs index b72539c89e..5c92d3f8db 100644 --- a/crates/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/crates/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -3,30 +3,25 @@ use data_encoding::HEXUPPER; use masp_primitives::merkle_tree::CommitmentTree; use masp_primitives::sapling::Node; +use namada::core::storage::{BlockHash, BlockResults, Epoch, Header}; use namada::governance::pgf::inflation as pgf_inflation; use namada::ledger::events::EventType; -use namada::ledger::gas::{GasMetering, TxGasMeter}; +use namada::ledger::gas::GasMetering; use namada::ledger::pos::namada_proof_of_stake; -use namada::ledger::protocol::{self, WrapperArgs}; +use namada::ledger::protocol::WrapperArgs; +use namada::proof_of_stake; use namada::proof_of_stake::storage::{ - find_validator_by_raw_hash, read_last_block_proposer_address, - write_last_block_proposer_address, + find_validator_by_raw_hash, write_last_block_proposer_address, }; -use namada::state::wl_storage::WriteLogAndStorage; use namada::state::write_log::StorageModification; -use namada::state::{ - ResultExt, StorageRead, StorageWrite, EPOCH_SWITCH_BLOCKS_DELAY, -}; -use namada::token::conversion::update_allowed_conversions; +use namada::state::{ResultExt, StorageWrite, EPOCH_SWITCH_BLOCKS_DELAY}; use namada::tx::data::protocol::ProtocolTxType; -use namada::types::key::tm_raw_hash_to_string; -use namada::types::storage::{BlockHash, BlockResults, Epoch, Header}; use namada::vote_ext::ethereum_events::MultiSignedEthEvent; use namada::vote_ext::ethereum_tx_data_variants; +use namada_sdk::tx::new_tx_event; -use super::governance::execute_governance_proposals; use super::*; -use crate::facade::tendermint::abci::types::{Misbehavior, VoteInfo}; +use crate::facade::tendermint::abci::types::VoteInfo; use crate::node::ledger::shell::stats::InternalStats; impl Shell @@ -61,12 +56,11 @@ where let mut response = shim::response::FinalizeBlock::default(); // Begin the new block and check if a new epoch has begun - let (height, new_epoch) = - self.update_state(req.header, req.hash, req.byzantine_validators); + let (height, new_epoch) = self.update_state(req.header, req.hash); - let (current_epoch, _gas) = self.wl_storage.storage.get_current_epoch(); + let (current_epoch, _gas) = self.state.in_mem().get_current_epoch(); let update_for_tendermint = matches!( - self.wl_storage.storage.update_epoch_blocks_delay, + self.state.in_mem().update_epoch_blocks_delay, Some(EPOCH_SWITCH_BLOCKS_DELAY) ); @@ -85,109 +79,53 @@ where tracing::debug!( "New epoch block delay for updating the Tendermint validator set: \ {:?}", - self.wl_storage.storage.update_epoch_blocks_delay + self.state.in_mem().update_epoch_blocks_delay ); // Finalize the transactions' hashes from the previous block - for hash in self.wl_storage.storage.iter_replay_protection() { - self.wl_storage - .write_log + let (write_log, _in_mem, db) = self.state.split_borrow(); + for (raw_key, _, _) in db.iter_replay_protection() { + let hash = raw_key.parse().expect("Failed hash conversion"); + write_log .finalize_tx_hash(hash) .expect("Failed tx hashes finalization") } - let pos_params = - namada_proof_of_stake::storage::read_pos_params(&self.wl_storage)?; - - if new_epoch { - update_allowed_conversions(&mut self.wl_storage)?; - - execute_governance_proposals(self, &mut response)?; - - // Copy the new_epoch + pipeline_len - 1 validator set into - // new_epoch + pipeline_len - namada_proof_of_stake::validator_set_update::copy_validator_sets_and_positions( - &mut self.wl_storage, - &pos_params, - current_epoch, - current_epoch + pos_params.pipeline_len, - )?; - - // Compute the total stake of the consensus validator set and record - // it in storage - namada_proof_of_stake::compute_and_store_total_consensus_stake( - &mut self.wl_storage, - current_epoch, - )?; - } - + let emit_events = &mut response.events; // Get the actual votes from cometBFT in the preferred format - let votes = pos_votes_from_abci(&self.wl_storage, &req.votes); - - // Invariant: Has to be applied before `record_slashes_from_evidence` - // because it potentially needs to be able to read validator state from - // previous epoch and jailing validator removes the historical state - if !votes.is_empty() { - self.log_block_rewards( - votes.clone(), - height, - current_epoch, - new_epoch, - )?; - } - - // Invariant: This has to be applied after - // `copy_validator_sets_and_positions` and before `self.update_epoch`. - self.record_slashes_from_evidence(); - // Invariant: This has to be applied after - // `copy_validator_sets_and_positions` if we're starting a new epoch - if new_epoch { - // Invariant: Process slashes before inflation as they may affect - // the rewards in the current epoch. - self.process_slashes(); - self.apply_inflation(current_epoch, &mut response)?; - } - - // Consensus set liveness check - if !votes.is_empty() { - let vote_height = height.prev_height(); - let epoch_of_votes = self - .wl_storage - .storage - .block - .pred_epochs - .get_epoch(vote_height) - .expect( - "Should always find an epoch when looking up the vote \ - height before recording liveness data.", - ); - namada_proof_of_stake::record_liveness_data( - &mut self.wl_storage, - &votes, - epoch_of_votes, - vote_height, - &pos_params, - )?; - } - + let votes = pos_votes_from_abci(&self.state, &req.votes); let validator_set_update_epoch = self.get_validator_set_update_epoch(current_epoch); - // Jail validators for inactivity - namada_proof_of_stake::jail_for_liveness( - &mut self.wl_storage, - &pos_params, - current_epoch, + // Sub-system updates: + // - Governance - applied first in case a proposal changes any of the + // other syb-systems + governance::finalize_block(self, emit_events, new_epoch)?; + // - Token + token::finalize_block(&mut self.state, emit_events, new_epoch)?; + // - PoS + // - Must be applied after governance in case it changes PoS params + proof_of_stake::finalize_block( + &mut self.state, + emit_events, + new_epoch, validator_set_update_epoch, + votes, + req.byzantine_validators, )?; + // Take IBC events that may be emitted from PGF + for ibc_event in self.state.write_log_mut().take_ibc_events() { + let mut event = Event::from(ibc_event.clone()); + // Add the height for IBC event query + let height = self.state.in_mem().get_last_block_height() + 1; + event["height"] = height.to_string(); + response.events.push(event); + } + if new_epoch { - // Prune liveness data from validators that are no longer in the - // consensus set - namada_proof_of_stake::prune_liveness_data( - &mut self.wl_storage, - current_epoch, - )?; + // Apply PoS and PGF inflation + self.apply_inflation(current_epoch)?; } let mut stats = InternalStats::default(); @@ -195,7 +133,7 @@ where let native_block_proposer_address = { let tm_raw_hash_string = tm_raw_hash_to_string(req.proposer_address); - find_validator_by_raw_hash(&self.wl_storage, tm_raw_hash_string) + find_validator_by_raw_hash(&self.state, tm_raw_hash_string) .unwrap() .expect( "Unable to find native validator address of block \ @@ -204,7 +142,7 @@ where }; // Tracks the accepted transactions - self.wl_storage.storage.block.results = BlockResults::default(); + self.state.in_mem_mut().block.results = BlockResults::default(); let mut changed_keys = BTreeSet::new(); for (tx_index, processed_tx) in req.txs.iter().enumerate() { let tx = if let Ok(tx) = Tx::try_from(processed_tx.tx.as_ref()) { @@ -224,7 +162,7 @@ where { let mut tx_event = match tx.header().tx_type { TxType::Wrapper(_) | TxType::Protocol(_) => { - Event::new_tx_event(&tx, height.0) + new_tx_event(&tx, height.0) } _ => { tracing::error!( @@ -257,7 +195,7 @@ where if ResultCode::from_u32(processed_tx.result.code).unwrap() != ResultCode::Ok { - let mut tx_event = Event::new_tx_event(&tx, height.0); + let mut tx_event = new_tx_event(&tx, height.0); tx_event["code"] = processed_tx.result.code.to_string(); tx_event["info"] = format!("Tx rejected: {}", &processed_tx.result.info); @@ -266,8 +204,8 @@ where // if the rejected tx was decrypted, remove it // from the queue of txs to be processed if let TxType::Decrypted(_) = &tx_header.tx_type { - self.wl_storage - .storage + self.state + .in_mem_mut() .tx_queue .pop() .expect("Missing wrapper tx in queue"); @@ -279,13 +217,13 @@ where let ( mut tx_event, embedding_wrapper, - mut tx_gas_meter, + tx_gas_meter, wrapper, mut wrapper_args, ) = match &tx_header.tx_type { TxType::Wrapper(wrapper) => { stats.increment_wrapper_txs(); - let tx_event = Event::new_tx_event(&tx, height.0); + let tx_event = new_tx_event(&tx, height.0); let gas_meter = TxGasMeter::new(wrapper.gas_limit); ( tx_event, @@ -301,12 +239,12 @@ where TxType::Decrypted(inner) => { // We remove the corresponding wrapper tx from the queue let tx_in_queue = self - .wl_storage - .storage + .state + .in_mem_mut() .tx_queue .pop() .expect("Missing wrapper tx in queue"); - let mut event = Event::new_tx_event(&tx, height.0); + let mut event = new_tx_event(&tx, height.0); match inner { DecryptedTx::Decrypted => { @@ -353,7 +291,7 @@ where | ProtocolTxType::BridgePool | ProtocolTxType::ValSetUpdateVext | ProtocolTxType::ValidatorSetUpdate => ( - Event::new_tx_event(&tx, height.0), + new_tx_event(&tx, height.0), None, TxGasMeter::new_from_sub_limit(0.into()), None, @@ -378,7 +316,7 @@ where } } ( - Event::new_tx_event(&tx, height.0), + new_tx_event(&tx, height.0), None, TxGasMeter::new_from_sub_limit(0.into()), None, @@ -395,7 +333,7 @@ where { let this_signer = &( address, - self.wl_storage.storage.get_last_block_height(), + self.state.in_mem().get_last_block_height(), ); for MultiSignedEthEvent { event, signers } in &digest.events @@ -406,7 +344,7 @@ where } } ( - Event::new_tx_event(&tx, height.0), + new_tx_event(&tx, height.0), None, TxGasMeter::new_from_sub_limit(0.into()), None, @@ -415,8 +353,8 @@ where } }, }; - - let tx_result = protocol::check_tx_allowed(&tx, &self.wl_storage) + let tx_gas_meter = RefCell::new(tx_gas_meter); + let tx_result = protocol::check_tx_allowed(&tx, &self.state) .and_then(|()| { protocol::dispatch_tx( tx, @@ -426,14 +364,15 @@ where .try_into() .expect("transaction index out of bounds"), ), - &mut tx_gas_meter, - &mut self.wl_storage, + &tx_gas_meter, + &mut self.state, &mut self.vp_wasm_cache, &mut self.tx_wasm_cache, wrapper_args.as_mut(), ) }) .map_err(Error::TxApply); + let tx_gas_meter = tx_gas_meter.into_inner(); match tx_result { Ok(result) => { if result.is_accepted() { @@ -450,7 +389,7 @@ where tx_event["is_valid_masp_tx"] = format!("{}", tx_index); } - self.wl_storage.storage.tx_queue.push(TxInQueue { + self.state.in_mem_mut().tx_queue.push(TxInQueue { tx: wrapper.expect("Missing expected wrapper"), gas: tx_gas_meter.get_available_gas(), }); @@ -476,11 +415,11 @@ where self.commit_inner_tx_hash(wrapper); } } - self.wl_storage.commit_tx(); + self.state.commit_tx(); if !tx_event.contains_key("code") { tx_event["code"] = ResultCode::Ok.into(); - self.wl_storage - .storage + self.state + .in_mem_mut() .block .results .accept(tx_index); @@ -524,7 +463,7 @@ where } stats.increment_rejected_txs(); - self.wl_storage.drop_tx(); + self.state.drop_tx(); tx_event["code"] = ResultCode::InvalidTx.into(); } tx_event["gas_used"] = result.gas_used.to_string(); @@ -561,7 +500,7 @@ where // hash. A replay of the wrapper is impossible since // the inner tx hash is committed to storage and // we validate the wrapper against that hash too - self.wl_storage + self.state .delete_tx_hash(wrapper.header_hash()) .expect( "Error while deleting tx hash from storage", @@ -570,7 +509,7 @@ where } stats.increment_errored_txs(); - self.wl_storage.drop_tx(); + self.state.drop_tx(); tx_event["gas_used"] = tx_gas_meter.get_tx_consumed_gas().to_string(); @@ -610,14 +549,14 @@ where // Update the MASP commitment tree anchor if the tree was updated let tree_key = token::storage_key::masp_commitment_tree_key(); if let Some(StorageModification::Write { value }) = - self.wl_storage.write_log.read(&tree_key).0 + self.state.write_log().read(&tree_key).0 { let updated_tree = CommitmentTree::::try_from_slice(value) .into_storage_result()?; let anchor_key = token::storage_key::masp_commitment_anchor_key( updated_tree.root(), ); - self.wl_storage.write(&anchor_key, ())?; + self.state.write(&anchor_key, ())?; } if update_for_tendermint { @@ -628,7 +567,7 @@ where } write_last_block_proposer_address( - &mut self.wl_storage, + &mut self.state, native_block_proposer_address, )?; @@ -647,25 +586,22 @@ where &mut self, header: Header, hash: BlockHash, - byzantine_validators: Vec, ) -> (BlockHeight, bool) { - let height = self.wl_storage.storage.get_last_block_height() + 1; + let height = self.state.in_mem().get_last_block_height() + 1; - self.wl_storage - .storage + self.state + .in_mem_mut() .begin_block(hash, height) .expect("Beginning a block shouldn't fail"); let header_time = header.time; - self.wl_storage - .storage + self.state + .in_mem_mut() .set_header(header) .expect("Setting a header shouldn't fail"); - self.byzantine_validators = byzantine_validators; - let new_epoch = self - .wl_storage + .state .update_epoch(height, header_time) .expect("Must be able to update epoch"); (height, new_epoch) @@ -694,85 +630,30 @@ where /// account, then update the reward products of the validators. This is /// executed while finalizing the first block of a new epoch and is applied /// with respect to the previous epoch. - fn apply_inflation( - &mut self, - current_epoch: Epoch, - response: &mut shim::response::FinalizeBlock, - ) -> Result<()> { + fn apply_inflation(&mut self, current_epoch: Epoch) -> Result<()> { let last_epoch = current_epoch.prev(); // Get the number of blocks in the last epoch - let first_block_of_last_epoch = self - .wl_storage - .storage - .block - .pred_epochs - .first_block_heights[last_epoch.0 as usize] - .0; + let first_block_of_last_epoch = + self.state.in_mem().block.pred_epochs.first_block_heights + [last_epoch.0 as usize] + .0; let num_blocks_in_last_epoch = - self.wl_storage.storage.block.height.0 - first_block_of_last_epoch; + self.state.in_mem().block.height.0 - first_block_of_last_epoch; // PoS inflation namada_proof_of_stake::rewards::apply_inflation( - &mut self.wl_storage, + &mut self.state, last_epoch, num_blocks_in_last_epoch, )?; // Pgf inflation pgf_inflation::apply_inflation( - &mut self.wl_storage, + self.state.restrict_writes_to_write_log(), namada::ibc::transfer_over_ibc, )?; - for ibc_event in self.wl_storage.write_log_mut().take_ibc_events() { - let mut event = Event::from(ibc_event.clone()); - // Add the height for IBC event query - let height = self.wl_storage.storage.get_last_block_height() + 1; - event["height"] = height.to_string(); - response.events.push(event); - } - - Ok(()) - } - // Process the proposer and votes in the block to assign their PoS rewards. - fn log_block_rewards( - &mut self, - votes: Vec, - height: BlockHeight, - current_epoch: Epoch, - new_epoch: bool, - ) -> Result<()> { - // Read the block proposer of the previously committed block in storage - // (n-1 if we are in the process of finalizing n right now). - match read_last_block_proposer_address(&self.wl_storage)? { - Some(proposer_address) => { - tracing::debug!( - "Found last block proposer: {proposer_address}" - ); - namada_proof_of_stake::rewards::log_block_rewards( - &mut self.wl_storage, - if new_epoch { - current_epoch.prev() - } else { - current_epoch - }, - &proposer_address, - votes, - )?; - } - None => { - if height > BlockHeight::default().next_height() { - tracing::error!( - "Can't find the last block proposer at height {height}" - ); - } else { - tracing::debug!( - "No last block proposer at height {height}" - ); - } - } - } Ok(()) } @@ -781,11 +662,11 @@ where // the wrapper). Requires the wrapper transaction as argument to recover // both the hashes. fn commit_inner_tx_hash(&mut self, wrapper_tx: Tx) { - self.wl_storage + self.state .write_tx_hash(wrapper_tx.raw_header_hash()) .expect("Error while writing tx hash to storage"); - self.wl_storage + self.state .delete_tx_hash(wrapper_tx.header_hash()) .expect("Error while deleting tx hash from storage"); } @@ -835,9 +716,7 @@ fn pos_votes_from_abci( ); // Try to convert voting power to u64 - let validator_vp = u64::try_from(*power).expect( - "Must be able to convert voting power from i64 to u64", - ); + let validator_vp = u64::from(*power); Some(namada_proof_of_stake::types::VoteInfo { validator_address, @@ -858,12 +737,18 @@ fn pos_votes_from_abci( /// are covered by the e2e tests. #[cfg(test)] mod test_finalize_block { - use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; + use std::collections::{BTreeMap, HashMap, HashSet}; use std::num::NonZeroU64; use std::str::FromStr; - use data_encoding::HEXUPPER; - use namada::core::ledger::replay_protection; + use namada::core::dec::{Dec, POS_DECIMAL_PRECISION}; + use namada::core::ethereum_events::{EthAddress, Uint as ethUint}; + use namada::core::hash::Hash; + use namada::core::keccak::KeccakHash; + use namada::core::key::testing::common_sk_from_simple_seed; + use namada::core::storage::KeySeg; + use namada::core::time::DurationSecs; + use namada::core::uint::Uint; use namada::eth_bridge::storage::bridge_pool::{ self, get_key_from_hash, get_nonce_key, get_signed_root_key, }; @@ -890,34 +775,25 @@ mod test_finalize_block { BondId, SlashType, ValidatorState, WeightedValidator, }; use namada::proof_of_stake::{unjail_validator, ADDRESS as pos_address}; - use namada::state::StorageWrite; + use namada::replay_protection; + use namada::tendermint::abci::types::{Misbehavior, MisbehaviorKind}; use namada::token::{Amount, DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES}; - use namada::tx::data::{Fee, WrapperTx}; - use namada::tx::{Code, Data, Section, Signature}; - use namada::types::dec::{Dec, POS_DECIMAL_PRECISION}; - use namada::types::ethereum_events::{EthAddress, Uint as ethUint}; - use namada::types::hash::Hash; - use namada::types::keccak::KeccakHash; - use namada::types::key::testing::common_sk_from_simple_seed; - use namada::types::key::tm_consensus_key_raw_hash; - use namada::types::storage::{Epoch, KeySeg}; - use namada::types::time::{DateTimeUtc, DurationSecs}; - use namada::types::uint::Uint; - use namada::vote_ext::{ethereum_events, EthereumTxData}; + use namada::tx::data::Fee; + use namada::tx::{Code, Data, Signature}; + use namada::vote_ext::ethereum_events; use namada_sdk::eth_bridge::MinimumConfirmations; use namada_sdk::governance::ProposalVote; use namada_sdk::proof_of_stake::storage::{ liveness_missed_votes_handle, liveness_sum_missed_votes_handle, read_consensus_validator_set_addresses, }; + use namada_sdk::validity_predicate::VpSentinel; use namada_test_utils::tx_data::TxWriteData; use namada_test_utils::TestWasms; use test_log::test; use super::*; - use crate::facade::tendermint::abci::types::{ - Misbehavior, Validator, VoteInfo, - }; + use crate::facade::tendermint::abci::types::Validator; use crate::node::ledger::oracle::control::Command; use crate::node::ledger::shell::test_utils::*; use crate::node::ledger::shims::abcipp_shim_types::shim::request::{ @@ -936,7 +812,7 @@ mod test_finalize_block { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -977,7 +853,7 @@ mod test_finalize_block { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1016,13 +892,12 @@ mod test_finalize_block { // Add unshielded balance for fee paymenty let balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &Address::from(&keypair.ref_to()), ); shell - .wl_storage - .storage - .write(&balance_key, Amount::native_whole(1000).serialize_to_vec()) + .state + .write(&balance_key, Amount::native_whole(1000)) .unwrap(); // create some wrapper txs @@ -1094,7 +969,7 @@ mod test_finalize_block { amount_per_gas_unit: DenominatedAmount::native( Default::default(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1134,7 +1009,7 @@ mod test_finalize_block { assert_eq!(code, &String::from(ResultCode::InvalidTx)); } // check that the corresponding wrapper tx was removed from the queue - assert!(shell.wl_storage.storage.tx_queue.is_empty()); + assert!(shell.state.in_mem().tx_queue.is_empty()); } /// Test that if a tx is undecryptable, it is applied @@ -1148,7 +1023,7 @@ mod test_finalize_block { let wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1186,7 +1061,7 @@ mod test_finalize_block { assert!(log.contains("Transaction could not be decrypted.")) } // check that the corresponding wrapper tx was removed from the queue - assert!(shell.wl_storage.storage.tx_queue.is_empty()); + assert!(shell.state.in_mem().tx_queue.is_empty()); } /// Test that the wrapper txs are queued in the order they @@ -1201,13 +1076,12 @@ mod test_finalize_block { // Add unshielded balance for fee payment let balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &Address::from(&keypair.ref_to()), ); shell - .wl_storage - .storage - .write(&balance_key, Amount::native_whole(1000).serialize_to_vec()) + .state + .write(&balance_key, Amount::native_whole(1000)) .unwrap(); // create two decrypted txs @@ -1327,7 +1201,7 @@ mod test_finalize_block { // ---- The protocol tx that includes this event on-chain let ext = ethereum_events::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), ethereum_events: vec![event.clone()], validator_addr: address.clone(), } @@ -1338,13 +1212,13 @@ mod test_finalize_block { event, signers: BTreeSet::from([( address.clone(), - shell.wl_storage.storage.get_last_block_height(), + shell.state.in_mem().get_last_block_height(), )]), }; let digest = ethereum_events::VextDigest { signatures: vec![( - (address, shell.wl_storage.storage.get_last_block_height()), + (address, shell.state.in_mem().get_last_block_height()), ext.sig, )] .into_iter() @@ -1406,7 +1280,7 @@ mod test_finalize_block { // ---- The protocol tx that includes this event on-chain let ext = ethereum_events::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), ethereum_events: vec![event], validator_addr: address, } @@ -1455,22 +1329,21 @@ mod test_finalize_block { { let (mut shell, _, _, _) = setup_at_height(1u64); namada::eth_bridge::test_utils::commit_bridge_pool_root_at_height( - &mut shell.wl_storage, + &mut shell.state, &KeccakHash([1; 32]), 1.into(), ); let value = BlockHeight(2).serialize_to_vec(); shell - .wl_storage - .storage + .state + .in_mem_mut() .block .tree .update(&get_key_from_hash(&KeccakHash([1; 32])), value) .expect("Test failed"); shell - .wl_storage - .storage - .write(&get_nonce_key(), Uint::from(1).serialize_to_vec()) + .state + .db_write(&get_nonce_key(), Uint::from(1).serialize_to_vec()) .expect("Test failed"); let (tx, action) = craft_tx(&mut shell); let processed_tx = ProcessedTx { @@ -1485,16 +1358,16 @@ mod test_finalize_block { ..Default::default() }; let root = shell - .wl_storage + .state .read_bytes(&get_signed_root_key()) .expect("Reading signed Bridge pool root shouldn't fail."); assert!(root.is_none()); _ = shell.finalize_block(req).expect("Test failed"); - shell.wl_storage.commit_block().unwrap(); + shell.state.commit_block().unwrap(); match action { TestBpAction::VerifySignedRoot => { let (root, _) = shell - .wl_storage + .state .ethbridge_queries() .get_signed_bridge_pool_root() .expect("Test failed"); @@ -1502,10 +1375,8 @@ mod test_finalize_block { assert_eq!(root.data.1, ethUint::from(1)); } TestBpAction::CheckNonceIncremented => { - let nonce = shell - .wl_storage - .ethbridge_queries() - .get_bridge_pool_nonce(); + let nonce = + shell.state.ethbridge_queries().get_bridge_pool_nonce(); assert_eq!(nonce, ethUint::from(2)); } } @@ -1528,30 +1399,24 @@ mod test_finalize_block { ); let supply_key = token::storage_key::minted_balance_key(&token); let amt: Amount = 999_999_u64.into(); - shell - .wl_storage - .write(&owner_key, amt) - .expect("Test failed"); - shell - .wl_storage - .write(&supply_key, amt) - .expect("Test failed"); + shell.state.write(&owner_key, amt).expect("Test failed"); + shell.state.write(&supply_key, amt).expect("Test failed"); } // add bertha's gas fees the pool { let amt: Amount = 999_999_u64.into(); let pool_balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &bridge_pool::BRIDGE_POOL_ADDRESS, ); shell - .wl_storage + .state .write(&pool_balance_key, amt) .expect("Test failed"); } // write transfer to storage let transfer = { - use namada::types::eth_bridge_pool::{ + use namada::core::eth_bridge_pool::{ GasFee, PendingTransfer, TransferToEthereum, TransferToEthereumKind, }; @@ -1564,14 +1429,14 @@ mod test_finalize_block { sender: bertha.clone(), }, gas_fee: GasFee { - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), amount: 10u64.into(), payer: bertha.clone(), }, }; let transfer = (&pending).into(); shell - .wl_storage + .state .write(&bridge_pool::get_pending_key(&pending), pending) .expect("Test failed"); transfer @@ -1586,10 +1451,7 @@ mod test_finalize_block { let ext = { let ext = ethereum_events::Vext { validator_addr, - block_height: shell - .wl_storage - .storage - .get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), ethereum_events: vec![ethereum_event], } .sign(&protocol_key); @@ -1628,23 +1490,22 @@ mod test_finalize_block { min_duration: DurationSecs(0), }; namada::ledger::parameters::update_epoch_parameter( - &mut shell.wl_storage, + &mut shell.state, &epoch_duration, ) .unwrap(); - shell.wl_storage.storage.next_epoch_min_start_height = BlockHeight(5); - shell.wl_storage.storage.next_epoch_min_start_time = DateTimeUtc::now(); + shell.state.in_mem_mut().next_epoch_min_start_height = BlockHeight(5); + shell.state.in_mem_mut().next_epoch_min_start_time = DateTimeUtc::now(); let txs_key = gen_keypair(); // Add unshielded balance for fee payment let balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &Address::from(&txs_key.ref_to()), ); shell - .wl_storage - .storage - .write(&balance_key, Amount::native_whole(1000).serialize_to_vec()) + .state + .write(&balance_key, Amount::native_whole(1000)) .unwrap(); // Add a proposal to be executed on next epoch change. @@ -1663,7 +1524,7 @@ mod test_finalize_block { }; namada::governance::init_proposal( - &mut shell.wl_storage, + &mut shell.state, proposal, vec![], None, @@ -1678,8 +1539,7 @@ mod test_finalize_block { }; // Vote to accept the proposal (there's only one validator, so its // vote decides) - namada::governance::vote_proposal(&mut shell.wl_storage, vote) - .unwrap(); + namada::governance::vote_proposal(&mut shell.state, vote).unwrap(); }; // Add a proposal to be accepted and one to be rejected. @@ -1687,15 +1547,14 @@ mod test_finalize_block { add_proposal(1, ProposalVote::Nay); // Commit the genesis state - shell.wl_storage.commit_block().unwrap(); + shell.state.commit_block().unwrap(); shell.commit(); // Collect all storage key-vals into a sorted map let store_block_state = |shell: &TestShell| -> BTreeMap<_, _> { shell - .wl_storage - .storage - .db + .state + .db() .iter_prefix(None) .map(|(key, val, _gas)| (key, val)) .collect() @@ -1710,20 +1569,20 @@ mod test_finalize_block { // Keep applying finalize block let validator = shell.mode.get_validator_address().unwrap(); let pos_params = - namada_proof_of_stake::storage::read_pos_params(&shell.wl_storage) + namada_proof_of_stake::storage::read_pos_params(&shell.state) .unwrap(); let consensus_key = namada_proof_of_stake::storage::validator_consensus_key_handle( validator, ) - .get(&shell.wl_storage, Epoch::default(), &pos_params) + .get(&shell.state, Epoch::default(), &pos_params) .unwrap() .unwrap(); let proposer_address = HEXUPPER .decode(consensus_key.tm_raw_hash().as_bytes()) .unwrap(); let val_stake = read_validator_stake( - &shell.wl_storage, + &shell.state, &pos_params, validator, Epoch::default(), @@ -1762,12 +1621,12 @@ mod test_finalize_block { ..Default::default() }; // merkle tree root before finalize_block - let root_pre = shell.shell.wl_storage.storage.block.tree.root(); + let root_pre = shell.shell.state.in_mem().block.tree.root(); let _events = shell.finalize_block(req).unwrap(); // the merkle tree root should not change after finalize_block - let root_post = shell.shell.wl_storage.storage.block.tree.root(); + let root_post = shell.shell.state.in_mem().block.tree.root(); assert_eq!(root_pre.0, root_post.0); let new_state = store_block_state(&shell); // The new state must be unchanged @@ -1776,7 +1635,7 @@ mod test_finalize_block { new_state.iter(), ); // Commit the block to move on to the next one - shell.wl_storage.commit_block().unwrap(); + shell.state.commit_block().unwrap(); // Store the state after commit for the next iteration last_storage_state = store_block_state(&shell); @@ -1804,14 +1663,14 @@ mod test_finalize_block { let mut validator_set: BTreeSet = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() .into_iter() .collect(); - let params = read_pos_params(&shell.wl_storage).unwrap(); + let params = read_pos_params(&shell.state).unwrap(); let val1 = validator_set.pop_first().unwrap(); let val2 = validator_set.pop_first().unwrap(); @@ -1820,7 +1679,7 @@ mod test_finalize_block { let get_pkh = |address, epoch| { let ck = validator_consensus_key_handle(&address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap() .unwrap(); let hash_string = tm_consensus_key_raw_hash(&ck); @@ -1913,11 +1772,7 @@ mod test_finalize_block { // won't receive votes from TM since we receive votes at a 1-block // delay, so votes will be empty here next_block_for_inflation(&mut shell, pkh1.to_vec(), vec![], None); - assert!( - rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap() - ); + assert!(rewards_accumulator_handle().is_empty(&shell.state).unwrap()); // FINALIZE BLOCK 2. Tell Namada that val1 is the block proposer. // Include votes that correspond to block 1. Make val2 the next block's @@ -1928,20 +1783,16 @@ mod test_finalize_block { votes.clone(), None, ); - assert!(rewards_prod_1.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_2.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_3.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_4.is_empty(&shell.wl_storage).unwrap()); - assert!( - !rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap() - ); + assert!(rewards_prod_1.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_2.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_3.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_4.is_empty(&shell.state).unwrap()); + assert!(!rewards_accumulator_handle().is_empty(&shell.state).unwrap()); // Val1 was the proposer, so its reward should be larger than all // others, which should themselves all be equal - let acc_sum = get_rewards_sum(&shell.wl_storage); + let acc_sum = get_rewards_sum(&shell.state); assert!(is_decimal_equal_enough(Dec::one(), acc_sum)); - let acc = get_rewards_acc(&shell.wl_storage); + let acc = get_rewards_acc(&shell.state); assert_eq!(acc.get(&val2.address), acc.get(&val3.address)); assert_eq!(acc.get(&val2.address), acc.get(&val4.address)); assert!( @@ -1951,16 +1802,16 @@ mod test_finalize_block { // FINALIZE BLOCK 3, with val1 as proposer for the next block. next_block_for_inflation(&mut shell, pkh1.to_vec(), votes, None); - assert!(rewards_prod_1.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_2.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_3.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_4.is_empty(&shell.wl_storage).unwrap()); + assert!(rewards_prod_1.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_2.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_3.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_4.is_empty(&shell.state).unwrap()); // Val2 was the proposer for this block, so its rewards accumulator // should be the same as val1 now. Val3 and val4 should be equal as // well. - let acc_sum = get_rewards_sum(&shell.wl_storage); + let acc_sum = get_rewards_sum(&shell.state); assert!(is_decimal_equal_enough(Dec::two(), acc_sum)); - let acc = get_rewards_acc(&shell.wl_storage); + let acc = get_rewards_acc(&shell.state); assert_eq!(acc.get(&val1.address), acc.get(&val2.address)); assert_eq!(acc.get(&val3.address), acc.get(&val4.address)); assert!( @@ -2029,13 +1880,13 @@ mod test_finalize_block { votes.clone(), None, ); - assert!(rewards_prod_1.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_2.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_3.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_4.is_empty(&shell.wl_storage).unwrap()); - let acc_sum = get_rewards_sum(&shell.wl_storage); + assert!(rewards_prod_1.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_2.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_3.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_4.is_empty(&shell.state).unwrap()); + let acc_sum = get_rewards_sum(&shell.state); assert!(is_decimal_equal_enough(Dec::new(3, 0).unwrap(), acc_sum)); - let acc = get_rewards_acc(&shell.wl_storage); + let acc = get_rewards_acc(&shell.state); assert!( acc.get(&val1.address).cloned().unwrap() > acc.get(&val2.address).cloned().unwrap() @@ -2052,15 +1903,12 @@ mod test_finalize_block { // Advance to the start of epoch 1. Val1 is the only block proposer for // the rest of the epoch. Val4 does not vote for the rest of the epoch. let height_of_next_epoch = - shell.wl_storage.storage.next_epoch_min_start_height; + shell.state.in_mem().next_epoch_min_start_height; let current_height = 4_u64; - assert_eq!(current_height, shell.wl_storage.storage.block.height.0); + assert_eq!(current_height, shell.state.in_mem().block.height.0); for _ in current_height..height_of_next_epoch.0 + 2 { - dbg!( - get_rewards_acc(&shell.wl_storage), - get_rewards_sum(&shell.wl_storage), - ); + dbg!(get_rewards_acc(&shell.state), get_rewards_sum(&shell.state)); next_block_for_inflation( &mut shell, pkh1.to_vec(), @@ -2068,25 +1916,21 @@ mod test_finalize_block { None, ); } - assert!( - rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap() - ); + assert!(rewards_accumulator_handle().is_empty(&shell.state).unwrap()); let rp1 = rewards_prod_1 - .get(&shell.wl_storage, &Epoch::default()) + .get(&shell.state, &Epoch::default()) .unwrap() .unwrap(); let rp2 = rewards_prod_2 - .get(&shell.wl_storage, &Epoch::default()) + .get(&shell.state, &Epoch::default()) .unwrap() .unwrap(); let rp3 = rewards_prod_3 - .get(&shell.wl_storage, &Epoch::default()) + .get(&shell.state, &Epoch::default()) .unwrap() .unwrap(); let rp4 = rewards_prod_4 - .get(&shell.wl_storage, &Epoch::default()) + .get(&shell.state, &Epoch::default()) .unwrap() .unwrap(); assert!(rp1 > rp2); @@ -2105,20 +1949,20 @@ mod test_finalize_block { let mut validator_set: BTreeSet = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() .into_iter() .collect(); - let params = read_pos_params(&shell.wl_storage).unwrap(); + let params = read_pos_params(&shell.state).unwrap(); let validator = validator_set.pop_first().unwrap(); let get_pkh = |address, epoch| { let ck = validator_consensus_key_handle(&address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap() .unwrap(); let hash_string = tm_consensus_key_raw_hash(&ck); @@ -2160,11 +2004,7 @@ mod test_finalize_block { // won't receive votes from TM since we receive votes at a 1-block // delay, so votes will be empty here next_block_for_inflation(&mut shell, pkh1.to_vec(), vec![], None); - assert!( - rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap() - ); + assert!(rewards_accumulator_handle().is_empty(&shell.state).unwrap()); let (current_epoch, inflation) = advance_epoch(&mut shell, &pkh1, &votes, None); @@ -2172,7 +2012,7 @@ mod test_finalize_block { // Query the available rewards let query_rewards = namada_proof_of_stake::query_reward_tokens( - &shell.wl_storage, + &shell.state, None, &validator.address, current_epoch, @@ -2181,7 +2021,7 @@ mod test_finalize_block { // Claim the rewards from the initial epoch let reward_1 = namada_proof_of_stake::claim_reward_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator.address, current_epoch, @@ -2194,7 +2034,7 @@ mod test_finalize_block { // Query the available rewards again and check that it is 0 now after // the claim let query_rewards = namada_proof_of_stake::query_reward_tokens( - &shell.wl_storage, + &shell.state, None, &validator.address, current_epoch, @@ -2210,7 +2050,7 @@ mod test_finalize_block { None, ); let att = namada_proof_of_stake::claim_reward_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator.address, current_epoch, @@ -2226,7 +2066,7 @@ mod test_finalize_block { // Unbond some tokens let unbond_amount = token::Amount::native_whole(50_000); let unbond_res = namada_proof_of_stake::unbond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator.address, unbond_amount, @@ -2238,7 +2078,7 @@ mod test_finalize_block { // Query the available rewards let query_rewards = namada_proof_of_stake::query_reward_tokens( - &shell.wl_storage, + &shell.state, None, &validator.address, current_epoch, @@ -2246,7 +2086,7 @@ mod test_finalize_block { .unwrap(); let rew = namada_proof_of_stake::claim_reward_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator.address, current_epoch, @@ -2260,13 +2100,13 @@ mod test_finalize_block { let withdraw_epoch = current_epoch + params.withdrawable_epoch_offset(); let last_claim_epoch = namada_proof_of_stake::storage::get_last_reward_claim_epoch( - &shell.wl_storage, + &shell.state, &validator.address, &validator.address, ) .unwrap(); let bond_amounts = namada_proof_of_stake::bond_amounts_for_rewards( - &shell.wl_storage, + &shell.state, &bond_id, last_claim_epoch.unwrap_or_default(), withdraw_epoch, @@ -2291,8 +2131,8 @@ mod test_finalize_block { let mut missed_rewards = token::Amount::zero(); while current_epoch < withdraw_epoch { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (new_epoch, inflation) = advance_epoch(&mut shell, &pkh1, &votes, None); @@ -2306,7 +2146,7 @@ mod test_finalize_block { // Withdraw tokens let withdraw_amount = namada_proof_of_stake::withdraw_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator.address, current_epoch, @@ -2316,7 +2156,7 @@ mod test_finalize_block { // Query the available rewards let query_rewards = namada_proof_of_stake::query_reward_tokens( - &shell.wl_storage, + &shell.state, None, &validator.address, current_epoch, @@ -2325,7 +2165,7 @@ mod test_finalize_block { // Claim tokens let reward_2 = namada_proof_of_stake::claim_reward_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator.address, current_epoch, @@ -2346,7 +2186,7 @@ mod test_finalize_block { // Query the available rewards to check that they are 0 let query_rewards = namada_proof_of_stake::query_reward_tokens( - &shell.wl_storage, + &shell.state, None, &validator.address, current_epoch, @@ -2366,27 +2206,27 @@ mod test_finalize_block { let mut validator_set: BTreeSet = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() .into_iter() .collect(); - let params = read_pos_params(&shell.wl_storage).unwrap(); + let params = read_pos_params(&shell.state).unwrap(); let validator = validator_set.pop_first().unwrap(); let commission_rate = namada_proof_of_stake::storage::validator_commission_rate_handle( &validator.address, ) - .get(&shell.wl_storage, Epoch(0), ¶ms) + .get(&shell.state, Epoch(0), ¶ms) .unwrap() .unwrap(); let get_pkh = |address, epoch| { let ck = validator_consensus_key_handle(&address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap() .unwrap(); let hash_string = tm_consensus_key_raw_hash(&ck); @@ -2412,26 +2252,22 @@ mod test_finalize_block { // won't receive votes from TM since we receive votes at a 1-block // delay, so votes will be empty here next_block_for_inflation(&mut shell, pkh1.clone(), vec![], None); - assert!( - rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap() - ); + assert!(rewards_accumulator_handle().is_empty(&shell.state).unwrap()); // Make an account with balance and delegate some tokens let delegator = address::testing::gen_implicit_address(); let del_amount = init_stake; - let staking_token = shell.wl_storage.storage.native_token.clone(); + let staking_token = shell.state.in_mem().native_token.clone(); namada::token::credit_tokens( - &mut shell.wl_storage, + &mut shell.state, &staking_token, &delegator, 2 * init_stake, ) .unwrap(); - let mut current_epoch = shell.wl_storage.storage.block.epoch; + let mut current_epoch = shell.state.in_mem().block.epoch; namada_proof_of_stake::bond_tokens( - &mut shell.wl_storage, + &mut shell.state, Some(&delegator), &validator.address, del_amount, @@ -2443,8 +2279,8 @@ mod test_finalize_block { // Advance to pipeline epoch for _ in 0..params.pipeline_len { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (new_epoch, inflation) = advance_epoch(&mut shell, &pkh1, &votes, None); @@ -2454,7 +2290,7 @@ mod test_finalize_block { // Claim the rewards for the validator for the first two epochs let val_reward_1 = namada_proof_of_stake::claim_reward_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator.address, current_epoch, @@ -2470,8 +2306,8 @@ mod test_finalize_block { // Go to the next epoch, where now the delegator's stake has been active // for an epoch let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (new_epoch, inflation_3) = advance_epoch(&mut shell, &pkh1, &votes, None); @@ -2480,7 +2316,7 @@ mod test_finalize_block { // Claim again for the validator let val_reward_2 = namada_proof_of_stake::claim_reward_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator.address, current_epoch, @@ -2489,7 +2325,7 @@ mod test_finalize_block { // Claim for the delegator let del_reward_1 = namada_proof_of_stake::claim_reward_tokens( - &mut shell.wl_storage, + &mut shell.state, Some(&delegator), &validator.address, current_epoch, @@ -2529,15 +2365,15 @@ mod test_finalize_block { let mut validators: BTreeSet = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() .into_iter() .collect(); - let params = read_pos_params(&shell.wl_storage).unwrap(); - let mut current_epoch = shell.wl_storage.storage.block.epoch; + let params = read_pos_params(&shell.state).unwrap(); + let mut current_epoch = shell.state.in_mem().block.epoch; let validator1 = validators.pop_first().unwrap(); let validator2 = validators.pop_first().unwrap(); @@ -2546,23 +2382,23 @@ mod test_finalize_block { let init_stake = validator1.bonded_stake; // Give the validators some tokens for txs - let staking_token = shell.wl_storage.storage.native_token.clone(); + let staking_token = shell.state.in_mem().native_token.clone(); namada::token::credit_tokens( - &mut shell.wl_storage, + &mut shell.state, &staking_token, &validator1.address, init_stake, ) .unwrap(); namada::token::credit_tokens( - &mut shell.wl_storage, + &mut shell.state, &staking_token, &validator2.address, init_stake, ) .unwrap(); namada::token::credit_tokens( - &mut shell.wl_storage, + &mut shell.state, &staking_token, &validator3.address, init_stake, @@ -2571,7 +2407,7 @@ mod test_finalize_block { let get_pkh = |address, epoch| { let ck = validator_consensus_key_handle(&address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap() .unwrap(); let hash_string = tm_consensus_key_raw_hash(&ck); @@ -2583,18 +2419,12 @@ mod test_finalize_block { // won't receive votes from TM since we receive votes at a 1-block // delay, so votes will be empty here next_block_for_inflation(&mut shell, pkh1.clone(), vec![], None); - assert!( - rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap() - ); + assert!(rewards_accumulator_handle().is_empty(&shell.state).unwrap()); // Check that there's 3 unique consensus keys let consensus_keys = - namada_proof_of_stake::storage::get_consensus_key_set( - &shell.wl_storage, - ) - .unwrap(); + namada_proof_of_stake::storage::get_consensus_key_set(&shell.state) + .unwrap(); assert_eq!(consensus_keys.len(), 3); // let ck1 = validator_consensus_key_handle(&validator) // .get(&storage, current_epoch, ¶ms) @@ -2608,7 +2438,7 @@ mod test_finalize_block { // Validator1 bonds 1 NAM let bond_amount = token::Amount::native_whole(1); namada_proof_of_stake::bond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator1.address, bond_amount, @@ -2620,7 +2450,7 @@ mod test_finalize_block { // Validator2 changes consensus key let new_ck2 = common_sk_from_simple_seed(1).ref_to(); namada_proof_of_stake::change_consensus_key( - &mut shell.wl_storage, + &mut shell.state, &validator2.address, &new_ck2, current_epoch, @@ -2629,7 +2459,7 @@ mod test_finalize_block { // Validator3 bonds 1 NAM and changes consensus key namada_proof_of_stake::bond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator3.address, bond_amount, @@ -2639,7 +2469,7 @@ mod test_finalize_block { .unwrap(); let new_ck3 = common_sk_from_simple_seed(2).ref_to(); namada_proof_of_stake::change_consensus_key( - &mut shell.wl_storage, + &mut shell.state, &validator3.address, &new_ck3, current_epoch, @@ -2648,17 +2478,15 @@ mod test_finalize_block { // Check that there's 5 unique consensus keys let consensus_keys = - namada_proof_of_stake::storage::get_consensus_key_set( - &shell.wl_storage, - ) - .unwrap(); + namada_proof_of_stake::storage::get_consensus_key_set(&shell.state) + .unwrap(); assert_eq!(consensus_keys.len(), 5); // Advance to pipeline epoch for _ in 0..params.pipeline_len { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (new_epoch, _inflation) = advance_epoch(&mut shell, &pkh1, &votes, None); @@ -2666,7 +2494,7 @@ mod test_finalize_block { } let consensus_vals = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, current_epoch, ) .unwrap(); @@ -2691,7 +2519,7 @@ mod test_finalize_block { // Val 1 changes consensus key let new_ck1 = common_sk_from_simple_seed(3).ref_to(); namada_proof_of_stake::change_consensus_key( - &mut shell.wl_storage, + &mut shell.state, &validator1.address, &new_ck1, current_epoch, @@ -2700,7 +2528,7 @@ mod test_finalize_block { // Val 2 is fully unbonded namada_proof_of_stake::unbond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator2.address, init_stake, @@ -2711,7 +2539,7 @@ mod test_finalize_block { // Val 3 is fully unbonded and changes consensus key namada_proof_of_stake::unbond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator3.address, init_stake + bond_amount, @@ -2721,7 +2549,7 @@ mod test_finalize_block { .unwrap(); let new2_ck3 = common_sk_from_simple_seed(4).ref_to(); namada_proof_of_stake::change_consensus_key( - &mut shell.wl_storage, + &mut shell.state, &validator1.address, &new2_ck3, current_epoch, @@ -2730,17 +2558,15 @@ mod test_finalize_block { // Check that there's 7 unique consensus keys let consensus_keys = - namada_proof_of_stake::storage::get_consensus_key_set( - &shell.wl_storage, - ) - .unwrap(); + namada_proof_of_stake::storage::get_consensus_key_set(&shell.state) + .unwrap(); assert_eq!(consensus_keys.len(), 7); // Advance to pipeline epoch for _ in 0..params.pipeline_len { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (new_epoch, _inflation) = advance_epoch(&mut shell, &pkh1, &votes, None); @@ -2748,7 +2574,7 @@ mod test_finalize_block { } let consensus_vals = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, current_epoch, ) .unwrap(); @@ -2765,7 +2591,7 @@ mod test_finalize_block { // Val2 bonds 1 NAM and changes consensus key namada_proof_of_stake::bond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator2.address, bond_amount, @@ -2775,7 +2601,7 @@ mod test_finalize_block { .unwrap(); let new2_ck2 = common_sk_from_simple_seed(5).ref_to(); namada_proof_of_stake::change_consensus_key( - &mut shell.wl_storage, + &mut shell.state, &validator2.address, &new2_ck2, current_epoch, @@ -2784,7 +2610,7 @@ mod test_finalize_block { // Val3 bonds 1 NAM namada_proof_of_stake::bond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator3.address, bond_amount, @@ -2795,17 +2621,15 @@ mod test_finalize_block { // Check that there's 8 unique consensus keys let consensus_keys = - namada_proof_of_stake::storage::get_consensus_key_set( - &shell.wl_storage, - ) - .unwrap(); + namada_proof_of_stake::storage::get_consensus_key_set(&shell.state) + .unwrap(); assert_eq!(consensus_keys.len(), 8); // Advance to pipeline epoch for _ in 0..params.pipeline_len { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (new_epoch, _inflation) = advance_epoch(&mut shell, &pkh1, &votes, None); @@ -2813,7 +2637,7 @@ mod test_finalize_block { } let consensus_vals = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, current_epoch, ) .unwrap(); @@ -2871,7 +2695,7 @@ mod test_finalize_block { replay_protection::last_key(&wrapper_tx.header_hash()); // merkle tree root before finalize_block - let root_pre = shell.shell.wl_storage.storage.block.tree.root(); + let root_pre = shell.shell.state.in_mem().block.tree.root(); let event = &shell .finalize_block(FinalizeBlock { @@ -2891,15 +2715,15 @@ mod test_finalize_block { assert_eq!(code, String::from(ResultCode::Ok).as_str()); // the merkle tree root should not change after finalize_block - let root_post = shell.shell.wl_storage.storage.block.tree.root(); + let root_post = shell.shell.state.in_mem().block.tree.root(); assert_eq!(root_pre.0, root_post.0); // Check transaction's hash in storage assert!( shell .shell - .wl_storage - .write_log + .state + .write_log() .has_replay_protection_entry(&wrapper_tx.header_hash()) .unwrap_or_default() ); @@ -2907,8 +2731,8 @@ mod test_finalize_block { assert!( !shell .shell - .wl_storage - .storage + .state + .in_mem() .block .tree .has_key(&wrapper_hash_key) @@ -2923,14 +2747,14 @@ mod test_finalize_block { let (mut shell, _, _, _) = setup(); let keypair = gen_keypair(); let keypair_2 = gen_keypair(); - let mut batch = namada::state::testing::TestStorage::batch(); + let mut batch = namada::state::testing::TestState::batch(); let tx_code = TestWasms::TxNoOp.read_bytes(); let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -2947,7 +2771,7 @@ mod test_finalize_block { new_wrapper.update_header(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair_2.ref_to(), Epoch(0), @@ -2976,8 +2800,7 @@ mod test_finalize_block { for tx in [&wrapper, &new_wrapper] { let hash_subkey = replay_protection::last_key(&tx.header_hash()); shell - .wl_storage - .storage + .state .write_replay_protection_entry(&mut batch, &hash_subkey) .expect("Test failed"); } @@ -2996,7 +2819,7 @@ mod test_finalize_block { shell.enqueue_tx(wrapper.clone(), GAS_LIMIT_MULTIPLIER.into()); shell.enqueue_tx(new_wrapper.clone(), GAS_LIMIT_MULTIPLIER.into()); // merkle tree root before finalize_block - let root_pre = shell.shell.wl_storage.storage.block.tree.root(); + let root_pre = shell.shell.state.in_mem().block.tree.root(); let event = &shell .finalize_block(FinalizeBlock { @@ -3006,7 +2829,7 @@ mod test_finalize_block { .expect("Test failed"); // the merkle tree root should not change after finalize_block - let root_post = shell.shell.wl_storage.storage.block.tree.root(); + let root_post = shell.shell.state.in_mem().block.tree.root(); assert_eq!(root_pre.0, root_post.0); assert_eq!(event[0].event_type.to_string(), String::from("applied")); @@ -3019,15 +2842,15 @@ mod test_finalize_block { for (inner, wrapper) in [(inner, wrapper), (new_inner, new_wrapper)] { assert!( shell - .wl_storage - .write_log + .state + .write_log() .has_replay_protection_entry(&inner.raw_header_hash()) .unwrap_or_default() ); assert!( !shell - .wl_storage - .write_log + .state + .write_log() .has_replay_protection_entry(&wrapper.header_hash()) .unwrap_or_default() ); @@ -3042,7 +2865,7 @@ mod test_finalize_block { fn test_tx_hash_handling() { let (mut shell, _, _, _) = setup(); let keypair = gen_keypair(); - let mut batch = namada::state::testing::TestStorage::batch(); + let mut batch = namada::state::testing::TestState::batch(); let (out_of_gas_wrapper, _) = mk_wrapper_tx(&shell, &keypair); let (undecryptable_wrapper, _) = mk_wrapper_tx(&shell, &keypair); @@ -3057,7 +2880,7 @@ mod test_finalize_block { amount_per_gas_unit: DenominatedAmount::native( Amount::zero(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -3119,8 +2942,7 @@ mod test_finalize_block { let hash_subkey = replay_protection::last_key(&wrapper.header_hash()); shell - .wl_storage - .storage + .state .write_replay_protection_entry(&mut batch, &hash_subkey) .unwrap(); } @@ -3154,7 +2976,7 @@ mod test_finalize_block { ); shell.enqueue_tx(failing_wrapper.clone(), GAS_LIMIT_MULTIPLIER.into()); // merkle tree root before finalize_block - let root_pre = shell.shell.wl_storage.storage.block.tree.root(); + let root_pre = shell.shell.state.in_mem().block.tree.root(); let event = &shell .finalize_block(FinalizeBlock { @@ -3164,7 +2986,7 @@ mod test_finalize_block { .expect("Test failed"); // the merkle tree root should not change after finalize_block - let root_post = shell.shell.wl_storage.storage.block.tree.root(); + let root_post = shell.shell.state.in_mem().block.tree.root(); assert_eq!(root_pre.0, root_post.0); assert_eq!(event[0].event_type.to_string(), String::from("applied")); @@ -3191,8 +3013,8 @@ mod test_finalize_block { ] { assert!( !shell - .wl_storage - .write_log + .state + .write_log() .has_replay_protection_entry( &invalid_inner.raw_header_hash() ) @@ -3200,23 +3022,22 @@ mod test_finalize_block { ); assert!( shell - .wl_storage - .storage + .state .has_replay_protection_entry(&valid_wrapper.header_hash()) .unwrap_or_default() ); } assert!( shell - .wl_storage - .write_log + .state + .write_log() .has_replay_protection_entry(&failing_inner.raw_header_hash()) .expect("test failed") ); assert!( !shell - .wl_storage - .write_log + .state + .write_log() .has_replay_protection_entry(&failing_wrapper.header_hash()) .unwrap_or_default() ); @@ -3234,7 +3055,7 @@ mod test_finalize_block { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -3264,7 +3085,7 @@ mod test_finalize_block { }, }]; // merkle tree root before finalize_block - let root_pre = shell.shell.wl_storage.storage.block.tree.root(); + let root_pre = shell.shell.state.in_mem().block.tree.root(); let event = &shell .finalize_block(FinalizeBlock { @@ -3274,7 +3095,7 @@ mod test_finalize_block { .expect("Test failed"); // the merkle tree root should not change after finalize_block - let root_post = shell.shell.wl_storage.storage.block.tree.root(); + let root_post = shell.shell.state.in_mem().block.tree.root(); assert_eq!(root_pre.0, root_post.0); assert_eq!(event[0].event_type.to_string(), String::from("accepted")); @@ -3287,15 +3108,15 @@ mod test_finalize_block { assert!( shell - .wl_storage - .write_log + .state + .write_log() .has_replay_protection_entry(&wrapper_hash) .unwrap_or_default() ); assert!( !shell - .wl_storage - .write_log + .state + .write_log() .has_replay_protection_entry(&wrapper.raw_header_hash()) .unwrap_or_default() ); @@ -3313,7 +3134,7 @@ mod test_finalize_block { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(100.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -3351,14 +3172,11 @@ mod test_finalize_block { let code = event.attributes.get("code").expect("Testfailed").as_str(); assert_eq!(code, String::from(ResultCode::InvalidTx).as_str()); let balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &Address::from(&keypair.to_public()), ); - let balance: Amount = shell - .wl_storage - .read(&balance_key) - .unwrap() - .unwrap_or_default(); + let balance: Amount = + shell.state.read(&balance_key).unwrap().unwrap_or_default(); assert_eq!(balance, 0.into()) } @@ -3371,13 +3189,13 @@ mod test_finalize_block { let validator = shell.mode.get_validator_address().unwrap().to_owned(); let pos_params = - namada_proof_of_stake::storage::read_pos_params(&shell.wl_storage) + namada_proof_of_stake::storage::read_pos_params(&shell.state) .unwrap(); let consensus_key = namada_proof_of_stake::storage::validator_consensus_key_handle( &validator, ) - .get(&shell.wl_storage, Epoch::default(), &pos_params) + .get(&shell.state, Epoch::default(), &pos_params) .unwrap() .unwrap(); let proposer_address = HEXUPPER @@ -3385,8 +3203,8 @@ mod test_finalize_block { .unwrap(); let proposer_balance = namada::token::read_balance( - &shell.wl_storage, - &shell.wl_storage.storage.native_token, + &shell.state, + &shell.state.in_mem().native_token, &validator, ) .unwrap(); @@ -3399,7 +3217,7 @@ mod test_finalize_block { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -3423,13 +3241,13 @@ mod test_finalize_block { let fee_amount = namada::token::denom_to_amount( fee_amount, &wrapper.header().wrapper().unwrap().fee.token, - &shell.wl_storage, + &shell.state, ) .unwrap(); let signer_balance = namada::token::read_balance( - &shell.wl_storage, - &shell.wl_storage.storage.native_token, + &shell.state, + &shell.state.in_mem().native_token, &wrapper.header().wrapper().unwrap().fee_payer(), ) .unwrap(); @@ -3456,8 +3274,8 @@ mod test_finalize_block { assert_eq!(code, String::from(ResultCode::Ok).as_str()); let new_proposer_balance = namada::token::read_balance( - &shell.wl_storage, - &shell.wl_storage.storage.native_token, + &shell.state, + &shell.state.in_mem().native_token, &validator, ) .unwrap(); @@ -3467,8 +3285,8 @@ mod test_finalize_block { ); let new_signer_balance = namada::token::read_balance( - &shell.wl_storage, - &shell.wl_storage.storage.native_token, + &shell.state, + &shell.state.in_mem().native_token, &wrapper.header().wrapper().unwrap().fee_payer(), ) .unwrap(); @@ -3486,13 +3304,13 @@ mod test_finalize_block { num_validators, ..Default::default() }); - let mut params = read_pos_params(&shell.wl_storage).unwrap(); + let mut params = read_pos_params(&shell.state).unwrap(); params.owned.unbonding_len = 4; - write_pos_params(&mut shell.wl_storage, ¶ms.owned)?; + write_pos_params(&mut shell.state, ¶ms.owned)?; let validator_set: Vec = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() @@ -3507,7 +3325,7 @@ mod test_finalize_block { let get_pkh = |address, epoch| { let ck = validator_consensus_key_handle(&address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap() .unwrap(); let hash_string = tm_consensus_key_raw_hash(&ck); @@ -3522,7 +3340,7 @@ mod test_finalize_block { // Every validator should be in the consensus set assert_eq!( validator_state_handle(&validator.address) - .get(&shell.wl_storage, Epoch::default(), ¶ms) + .get(&shell.state, Epoch::default(), ¶ms) .unwrap(), Some(ValidatorState::Consensus) ); @@ -3540,8 +3358,8 @@ mod test_finalize_block { next_block_for_inflation(&mut shell, pkh1.to_vec(), vec![], None); let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); assert!(!votes.is_empty()); assert_eq!(votes.len(), 7_usize); @@ -3578,7 +3396,7 @@ mod test_finalize_block { Some(byzantine_validators), ); - let processing_epoch = shell.wl_storage.storage.block.epoch + let processing_epoch = shell.state.in_mem().block.epoch + params.unbonding_len + 1_u64 + params.cubic_slashing_window_length; @@ -3587,60 +3405,60 @@ mod test_finalize_block { // are properly updated assert_eq!( validator_state_handle(&val1.address) - .get(&shell.wl_storage, Epoch::default(), ¶ms) + .get(&shell.state, Epoch::default(), ¶ms) .unwrap(), Some(ValidatorState::Consensus) ); assert_eq!( validator_state_handle(&val2.address) - .get(&shell.wl_storage, Epoch::default(), ¶ms) + .get(&shell.state, Epoch::default(), ¶ms) .unwrap(), Some(ValidatorState::Consensus) ); assert!( enqueued_slashes_handle() .at(&Epoch::default()) - .is_empty(&shell.wl_storage)? + .is_empty(&shell.state)? ); assert_eq!( - get_num_consensus_validators(&shell.wl_storage, Epoch::default()) + get_num_consensus_validators(&shell.state, Epoch::default()) .unwrap(), 7_u64 ); for epoch in Epoch::default().next().iter_range(params.pipeline_len) { assert_eq!( validator_state_handle(&val1.address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap(), Some(ValidatorState::Jailed) ); assert_eq!( validator_state_handle(&val2.address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap(), Some(ValidatorState::Jailed) ); assert!( enqueued_slashes_handle() .at(&epoch) - .is_empty(&shell.wl_storage)? + .is_empty(&shell.state)? ); assert_eq!( - get_num_consensus_validators(&shell.wl_storage, epoch).unwrap(), + get_num_consensus_validators(&shell.state, epoch).unwrap(), 5_u64 ); } assert!( !enqueued_slashes_handle() .at(&processing_epoch) - .is_empty(&shell.wl_storage)? + .is_empty(&shell.state)? ); // Advance to the processing epoch loop { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); next_block_for_inflation( &mut shell, @@ -3650,34 +3468,34 @@ mod test_finalize_block { ); // println!( // "Block {} epoch {}", - // shell.wl_storage.storage.block.height, - // shell.wl_storage.storage.block.epoch + // shell.state.in_mem().block.height, + // shell.state.in_mem().block.epoch // ); - if shell.wl_storage.storage.block.epoch == processing_epoch { + if shell.state.in_mem().block.epoch == processing_epoch { // println!("Reached processing epoch"); break; } else { assert!( enqueued_slashes_handle() - .at(&shell.wl_storage.storage.block.epoch) - .is_empty(&shell.wl_storage)? + .at(&shell.state.in_mem().block.epoch) + .is_empty(&shell.state)? ); let stake1 = read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, - shell.wl_storage.storage.block.epoch, + shell.state.in_mem().block.epoch, )?; let stake2 = read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val2.address, - shell.wl_storage.storage.block.epoch, + shell.state.in_mem().block.epoch, )?; let total_stake = read_total_stake( - &shell.wl_storage, + &shell.state, ¶ms, - shell.wl_storage.storage.block.epoch, + shell.state.in_mem().block.epoch, )?; assert_eq!(stake1, initial_stake); assert_eq!(stake2, initial_stake); @@ -3685,35 +3503,33 @@ mod test_finalize_block { } } - let num_slashes = namada::state::iter_prefix_bytes( - &shell.wl_storage, - &slashes_prefix(), - )? - .filter(|kv_res| { - let (k, _v) = kv_res.as_ref().unwrap(); - is_validator_slashes_key(k).is_some() - }) - .count(); + let num_slashes = + namada::state::iter_prefix_bytes(&shell.state, &slashes_prefix())? + .filter(|kv_res| { + let (k, _v) = kv_res.as_ref().unwrap(); + is_validator_slashes_key(k).is_some() + }) + .count(); assert_eq!(num_slashes, 2); assert_eq!( validator_slashes_handle(&val1.address) - .len(&shell.wl_storage) + .len(&shell.state) .unwrap(), 1_u64 ); assert_eq!( validator_slashes_handle(&val2.address) - .len(&shell.wl_storage) + .len(&shell.state) .unwrap(), 1_u64 ); let slash1 = validator_slashes_handle(&val1.address) - .get(&shell.wl_storage, 0)? + .get(&shell.state, 0)? .unwrap(); let slash2 = validator_slashes_handle(&val2.address) - .get(&shell.wl_storage, 0)? + .get(&shell.state, 0)? .unwrap(); assert_eq!(slash1.r#type, SlashType::DuplicateVote); @@ -3732,47 +3548,47 @@ mod test_finalize_block { // Check that there are still 5 consensus validators and the 2 // misbehaving ones are still jailed for epoch in shell - .wl_storage - .storage + .state + .in_mem() .block .epoch .iter_range(params.pipeline_len + 1) { assert_eq!( validator_state_handle(&val1.address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap(), Some(ValidatorState::Jailed) ); assert_eq!( validator_state_handle(&val2.address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap(), Some(ValidatorState::Jailed) ); assert_eq!( - get_num_consensus_validators(&shell.wl_storage, epoch).unwrap(), + get_num_consensus_validators(&shell.state, epoch).unwrap(), 5_u64 ); } // Check that the deltas at the pipeline epoch are slashed let pipeline_epoch = - shell.wl_storage.storage.block.epoch + params.pipeline_len; + shell.state.in_mem().block.epoch + params.pipeline_len; let stake1 = read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, pipeline_epoch, )?; let stake2 = read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val2.address, pipeline_epoch, )?; let total_stake = - read_total_stake(&shell.wl_storage, ¶ms, pipeline_epoch)?; + read_total_stake(&shell.state, ¶ms, pipeline_epoch)?; let expected_slashed = initial_stake.mul_ceil(cubic_rate); @@ -3796,51 +3612,50 @@ mod test_finalize_block { assert_eq!(total_stake, total_initial_stake - 2u64 * expected_slashed); // Unjail one of the validators - let current_epoch = shell.wl_storage.storage.block.epoch; - unjail_validator(&mut shell.wl_storage, &val1.address, current_epoch)?; + let current_epoch = shell.state.in_mem().block.epoch; + unjail_validator(&mut shell.state, &val1.address, current_epoch)?; let pipeline_epoch = current_epoch + params.pipeline_len; // Check that the state is the same until the pipeline epoch, at which // point one validator is unjailed for epoch in shell - .wl_storage - .storage + .state + .in_mem() .block .epoch .iter_range(params.pipeline_len) { assert_eq!( validator_state_handle(&val1.address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap(), Some(ValidatorState::Jailed) ); assert_eq!( validator_state_handle(&val2.address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap(), Some(ValidatorState::Jailed) ); assert_eq!( - get_num_consensus_validators(&shell.wl_storage, epoch).unwrap(), + get_num_consensus_validators(&shell.state, epoch).unwrap(), 5_u64 ); } assert_eq!( validator_state_handle(&val1.address) - .get(&shell.wl_storage, pipeline_epoch, ¶ms) + .get(&shell.state, pipeline_epoch, ¶ms) .unwrap(), Some(ValidatorState::Consensus) ); assert_eq!( validator_state_handle(&val2.address) - .get(&shell.wl_storage, pipeline_epoch, ¶ms) + .get(&shell.state, pipeline_epoch, ¶ms) .unwrap(), Some(ValidatorState::Jailed) ); assert_eq!( - get_num_consensus_validators(&shell.wl_storage, pipeline_epoch) - .unwrap(), + get_num_consensus_validators(&shell.state, pipeline_epoch).unwrap(), 6_u64 ); @@ -3878,19 +3693,19 @@ mod test_finalize_block { num_validators, ..Default::default() }); - let mut params = read_pos_params(&shell.wl_storage).unwrap(); + let mut params = read_pos_params(&shell.state).unwrap(); params.owned.unbonding_len = 4; params.owned.max_validator_slots = 50; - write_pos_params(&mut shell.wl_storage, ¶ms.owned)?; + write_pos_params(&mut shell.state, ¶ms.owned)?; // Slash pool balance - let nam_address = shell.wl_storage.storage.native_token.clone(); + let nam_address = shell.state.in_mem().native_token.clone(); let slash_balance_key = token::storage_key::balance_key( &nam_address, &namada_proof_of_stake::SLASH_POOL_ADDRESS, ); let slash_pool_balance_init: token::Amount = shell - .wl_storage + .state .read(&slash_balance_key) .expect("must be able to read") .unwrap_or_default(); @@ -3898,7 +3713,7 @@ mod test_finalize_block { let consensus_set: Vec = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() @@ -3907,7 +3722,7 @@ mod test_finalize_block { let val1 = consensus_set[0].clone(); let pkh1 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val1.address.clone(), Epoch::default(), @@ -3919,28 +3734,28 @@ mod test_finalize_block { // Finalize block 1 next_block_for_inflation(&mut shell, pkh1.to_vec(), vec![], None); - let votes = get_default_true_votes(&shell.wl_storage, Epoch::default()); + let votes = get_default_true_votes(&shell.state, Epoch::default()); assert!(!votes.is_empty()); // Advance to epoch 1 and // 1. Delegate 67231 NAM to validator // 2. Validator self-unbond 154654 NAM let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); - assert_eq!(shell.wl_storage.storage.block.epoch.0, 1_u64); + assert_eq!(shell.state.in_mem().block.epoch.0, 1_u64); // Make an account with balance and delegate some tokens let delegator = address::testing::gen_implicit_address(); let del_1_amount = token::Amount::native_whole(37_231); - let staking_token = shell.wl_storage.storage.native_token.clone(); + let staking_token = shell.state.in_mem().native_token.clone(); namada::token::credit_tokens( - &mut shell.wl_storage, + &mut shell.state, &staking_token, &delegator, token::Amount::native_whole(200_000), ) .unwrap(); namada_proof_of_stake::bond_tokens( - &mut shell.wl_storage, + &mut shell.state, Some(&delegator), &val1.address, del_1_amount, @@ -3952,7 +3767,7 @@ mod test_finalize_block { // Self-unbond let self_unbond_1_amount = token::Amount::native_whole(84_654); namada_proof_of_stake::unbond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &val1.address, self_unbond_1_amount, @@ -3962,7 +3777,7 @@ mod test_finalize_block { .unwrap(); let val_stake = namada_proof_of_stake::storage::read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, current_epoch + params.pipeline_len, @@ -3970,7 +3785,7 @@ mod test_finalize_block { .unwrap(); let total_stake = namada_proof_of_stake::storage::read_total_stake( - &shell.wl_storage, + &shell.state, ¶ms, current_epoch + params.pipeline_len, ) @@ -3988,14 +3803,14 @@ mod test_finalize_block { // Advance to epoch 2 and // 1. Unbond 18000 NAM from delegation let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); tracing::debug!("\nUnbonding in epoch 2"); let del_unbond_1_amount = token::Amount::native_whole(18_000); namada_proof_of_stake::unbond_tokens( - &mut shell.wl_storage, + &mut shell.state, Some(&delegator), &val1.address, del_unbond_1_amount, @@ -4005,14 +3820,14 @@ mod test_finalize_block { .unwrap(); let val_stake = namada_proof_of_stake::storage::read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, current_epoch + params.pipeline_len, ) .unwrap(); let total_stake = namada_proof_of_stake::storage::read_total_stake( - &shell.wl_storage, + &shell.state, ¶ms, current_epoch + params.pipeline_len, ) @@ -4033,15 +3848,15 @@ mod test_finalize_block { // Advance to epoch 3 and // 1. Validator self-bond 9123 NAM let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); tracing::debug!("\nBonding in epoch 3"); let self_bond_1_amount = token::Amount::native_whole(9_123); namada_proof_of_stake::bond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &val1.address, self_bond_1_amount, @@ -4053,15 +3868,15 @@ mod test_finalize_block { // Advance to epoch 4 // 1. Validator self-unbond 15000 NAM let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); assert_eq!(current_epoch.0, 4_u64); let self_unbond_2_amount = token::Amount::native_whole(15_000); namada_proof_of_stake::unbond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &val1.address, self_unbond_2_amount, @@ -4073,8 +3888,8 @@ mod test_finalize_block { // Advance to epoch 5 and // Delegate 8144 NAM to validator let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); assert_eq!(current_epoch.0, 5_u64); @@ -4083,7 +3898,7 @@ mod test_finalize_block { // Delegate let del_2_amount = token::Amount::native_whole(8_144); namada_proof_of_stake::bond_tokens( - &mut shell.wl_storage, + &mut shell.state, Some(&delegator), &val1.address, del_2_amount, @@ -4096,8 +3911,8 @@ mod test_finalize_block { // Advance to epoch 6 let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); assert_eq!(current_epoch.0, 6_u64); @@ -4106,12 +3921,8 @@ mod test_finalize_block { // NOTE: Only the type, height, and validator address fields from the // Misbehavior struct are used in Namada let misbehavior_epoch = Epoch(3_u64); - let height = shell - .wl_storage - .storage - .block - .pred_epochs - .first_block_heights[misbehavior_epoch.0 as usize]; + let height = shell.state.in_mem().block.pred_epochs.first_block_heights + [misbehavior_epoch.0 as usize]; let misbehaviors = vec![Misbehavior { kind: MisbehaviorKind::DuplicateVote, validator: Validator { @@ -4123,8 +3934,8 @@ mod test_finalize_block { total_voting_power: Default::default(), }]; let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); next_block_for_inflation( &mut shell, @@ -4142,7 +3953,7 @@ mod test_finalize_block { let enqueued_slash = enqueued_slashes_handle() .at(&processing_epoch) .at(&val1.address) - .front(&shell.wl_storage) + .front(&shell.state) .unwrap() .unwrap(); assert_eq!(enqueued_slash.epoch, misbehavior_epoch); @@ -4150,7 +3961,7 @@ mod test_finalize_block { assert_eq!(enqueued_slash.rate, Dec::zero()); let last_slash = namada_proof_of_stake::storage::read_validator_last_slash_epoch( - &shell.wl_storage, + &shell.state, &val1.address, ) .unwrap(); @@ -4159,7 +3970,7 @@ mod test_finalize_block { namada_proof_of_stake::storage::validator_slashes_handle( &val1.address ) - .is_empty(&shell.wl_storage) + .is_empty(&shell.state) .unwrap() ); @@ -4170,12 +3981,8 @@ mod test_finalize_block { // Discover two more misbehaviors, one committed in epoch 3, one in // epoch 4 - let height4 = shell - .wl_storage - .storage - .block - .pred_epochs - .first_block_heights[4]; + let height4 = + shell.state.in_mem().block.pred_epochs.first_block_heights[4]; let misbehaviors = vec![ Misbehavior { kind: MisbehaviorKind::DuplicateVote, @@ -4199,8 +4006,8 @@ mod test_finalize_block { }, ]; let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); next_block_for_inflation( &mut shell, @@ -4216,18 +4023,18 @@ mod test_finalize_block { .at(&processing_epoch.next()) .at(&val1.address); - assert_eq!(enqueued_slashes_8.len(&shell.wl_storage).unwrap(), 2_u64); - assert_eq!(enqueued_slashes_9.len(&shell.wl_storage).unwrap(), 1_u64); + assert_eq!(enqueued_slashes_8.len(&shell.state).unwrap(), 2_u64); + assert_eq!(enqueued_slashes_9.len(&shell.state).unwrap(), 1_u64); let last_slash = namada_proof_of_stake::storage::read_validator_last_slash_epoch( - &shell.wl_storage, + &shell.state, &val1.address, ) .unwrap(); assert_eq!(last_slash, Some(Epoch(4))); assert!( namada_proof_of_stake::is_validator_frozen( - &shell.wl_storage, + &shell.state, &val1.address, current_epoch, ¶ms @@ -4238,13 +4045,13 @@ mod test_finalize_block { namada_proof_of_stake::storage::validator_slashes_handle( &val1.address ) - .is_empty(&shell.wl_storage) + .is_empty(&shell.state) .unwrap() ); let pre_stake_10 = namada_proof_of_stake::storage::read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, Epoch(10), @@ -4265,26 +4072,26 @@ mod test_finalize_block { // Advance to epoch 9, where the infractions committed in epoch 3 will // be processed let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let _ = advance_epoch(&mut shell, &pkh1, &votes, None); let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); assert_eq!(current_epoch.0, 9_u64); let val_stake_3 = namada_proof_of_stake::storage::read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, Epoch(3), ) .unwrap(); let val_stake_4 = namada_proof_of_stake::storage::read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, Epoch(4), @@ -4292,13 +4099,13 @@ mod test_finalize_block { .unwrap(); let tot_stake_3 = namada_proof_of_stake::storage::read_total_stake( - &shell.wl_storage, + &shell.state, ¶ms, Epoch(3), ) .unwrap(); let tot_stake_4 = namada_proof_of_stake::storage::read_total_stake( - &shell.wl_storage, + &shell.state, ¶ms, Epoch(4), ) @@ -4311,7 +4118,7 @@ mod test_finalize_block { Dec::one(), Dec::new(9, 0).unwrap() * tot_frac * tot_frac, ); - dbg!(&cubic_rate); + dbg!(cubic_rate); let equal_enough = |rate1: Dec, rate2: Dec| -> bool { let tolerance = Dec::new(1, 9).unwrap(); @@ -4324,9 +4131,9 @@ mod test_finalize_block { namada_proof_of_stake::storage::validator_slashes_handle( &val1.address, ); - assert_eq!(val_slashes.len(&shell.wl_storage).unwrap(), 2u64); + assert_eq!(val_slashes.len(&shell.state).unwrap(), 2u64); let is_rate_good = val_slashes - .iter(&shell.wl_storage) + .iter(&shell.state) .unwrap() .all(|s| equal_enough(s.unwrap().rate, cubic_rate)); assert!(is_rate_good); @@ -4334,7 +4141,7 @@ mod test_finalize_block { // Check the amount of stake deducted from the futuremost epoch while // processing the slashes let post_stake_10 = read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, Epoch(10), @@ -4386,7 +4193,7 @@ mod test_finalize_block { // Check the balance of the Slash Pool // TODO: finish once implemented // let slash_pool_balance: token::Amount = shell - // .wl_storage + // .state // .read(&slash_balance_key) // .expect("must be able to read") // .unwrap_or_default(); @@ -4400,8 +4207,8 @@ mod test_finalize_block { // Advance to epoch 10, where the infraction committed in epoch 4 will // be processed let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); assert_eq!(current_epoch.0, 10_u64); @@ -4409,7 +4216,7 @@ mod test_finalize_block { // Check the balance of the Slash Pool // TODO: finish once implemented // let slash_pool_balance: token::Amount = shell - // .wl_storage + // .state // .read(&slash_balance_key) // .expect("must be able to read") // .unwrap_or_default(); @@ -4436,14 +4243,14 @@ mod test_finalize_block { // ); let val_stake = read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, current_epoch + params.pipeline_len, )?; let post_stake_10 = read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, Epoch(10), @@ -4491,17 +4298,17 @@ mod test_finalize_block { for _ in 0..2 { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let _ = advance_epoch(&mut shell, &pkh1, &votes, None); } - let current_epoch = shell.wl_storage.storage.block.epoch; + let current_epoch = shell.state.in_mem().block.epoch; assert_eq!(current_epoch.0, 12_u64); tracing::debug!("\nCHECK BOND AND UNBOND DETAILS"); let details = namada_proof_of_stake::queries::bonds_and_unbonds( - &shell.wl_storage, + &shell.state, None, None, ) @@ -4619,7 +4426,7 @@ mod test_finalize_block { // Withdraw the delegation unbonds, which total to 18_000. This should // only be affected by the slashes in epoch 3 let del_withdraw = namada_proof_of_stake::withdraw_tokens( - &mut shell.wl_storage, + &mut shell.state, Some(&delegator), &val1.address, current_epoch, @@ -4638,7 +4445,7 @@ mod test_finalize_block { // TODO: finish once implemented // Check the balance of the Slash Pool // let slash_pool_balance: token::Amount = shell - // .wl_storage + // .state // .read(&slash_balance_key) // .expect("must be able to read") // .unwrap_or_default(); @@ -4652,7 +4459,7 @@ mod test_finalize_block { // Withdraw the self unbonds, which total 154_654 + 15_000 - 9_123. Only // the (15_000 - 9_123) tokens are slashable. // let self_withdraw = namada_proof_of_stake::withdraw_tokens( - // &mut shell.wl_storage, + // &mut shell.state, // None, // &val1.address, // current_epoch, @@ -4665,7 +4472,7 @@ mod test_finalize_block { // ); // Check the balance of the Slash Pool // let slash_pool_balance: token::Amount = shell - // .wl_storage + // .state // .read(&slash_balance_key) // .expect("must be able to read") // .unwrap_or_default(); @@ -4695,11 +4502,11 @@ mod test_finalize_block { num_validators, ..Default::default() }); - let params = read_pos_params(&shell.wl_storage).unwrap(); + let params = read_pos_params(&shell.state).unwrap(); let initial_consensus_set: Vec
= read_consensus_validator_set_addresses( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() @@ -4707,14 +4514,14 @@ mod test_finalize_block { .collect(); let val1 = initial_consensus_set[0].clone(); let pkh1 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val1.clone(), Epoch::default(), ); let val2 = initial_consensus_set[1].clone(); let pkh2 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val2.clone(), Epoch::default(), @@ -4722,7 +4529,7 @@ mod test_finalize_block { let validator_stake = namada_proof_of_stake::storage::read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val2, Epoch::default(), @@ -4739,8 +4546,8 @@ mod test_finalize_block { // Ensure that there is no liveness data yet since there were no votes let missed_votes = liveness_missed_votes_handle(); let sum_missed_votes = liveness_sum_missed_votes_handle(); - assert!(missed_votes.is_empty(&shell.wl_storage)?); - assert!(sum_missed_votes.is_empty(&shell.wl_storage)?); + assert!(missed_votes.is_empty(&shell.state)?); + assert!(sum_missed_votes.is_empty(&shell.state)?); let minimum_unsigned_blocks = ((Dec::one() - params.liveness_threshold) @@ -4751,8 +4558,8 @@ mod test_finalize_block { // Finalize block 2 and ensure that some data has been written let default_all_votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); next_block_for_inflation( &mut shell, @@ -4760,17 +4567,17 @@ mod test_finalize_block { default_all_votes.clone(), None, ); - assert!(missed_votes.is_empty(&shell.wl_storage)?); + assert!(missed_votes.is_empty(&shell.state)?); for val in &initial_consensus_set { - let sum = sum_missed_votes.get(&shell.wl_storage, val)?; + let sum = sum_missed_votes.get(&shell.state, val)?; assert_eq!(sum, Some(0u64)); } // Completely unbond one of the validator to test the pruning at the // pipeline epoch - let mut current_epoch = shell.wl_storage.storage.block.epoch; + let mut current_epoch = shell.state.in_mem().block.epoch; namada_proof_of_stake::unbond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &val5, validator_stake, @@ -4778,16 +4585,12 @@ mod test_finalize_block { false, )?; let pipeline_vals = read_consensus_validator_set_addresses( - &shell.wl_storage, + &shell.state, current_epoch + params.pipeline_len, )?; assert_eq!(pipeline_vals.len(), initial_consensus_set.len() - 1); let val5_pipeline_state = validator_state_handle(&val5) - .get( - &shell.wl_storage, - current_epoch + params.pipeline_len, - ¶ms, - )? + .get(&shell.state, current_epoch + params.pipeline_len, ¶ms)? .unwrap(); assert_eq!(val5_pipeline_state, ValidatorState::BelowThreshold); @@ -4802,8 +4605,8 @@ mod test_finalize_block { // NOTE: assume the minimum blocks for jailing is larger than remaining // blocks to next epoch! let mut votes_no2 = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); votes_no2.retain(|vote| vote.validator.address != pkh2); @@ -4816,25 +4619,25 @@ mod test_finalize_block { votes_no2.clone(), None, ); - current_epoch = shell.wl_storage.storage.block.epoch; + current_epoch = shell.state.in_mem().block.epoch; val2_num_missed_blocks += 1; } // Checks upon the new epoch for val in &initial_consensus_set { let missed_votes = liveness_missed_votes_handle().at(val); - let sum = sum_missed_votes.get(&shell.wl_storage, val)?; + let sum = sum_missed_votes.get(&shell.state, val)?; if val == &val2 { assert_eq!(sum, Some(val2_num_missed_blocks)); for height in first_height_without_vote ..first_height_without_vote + val2_num_missed_blocks { - assert!(missed_votes.contains(&shell.wl_storage, &height)?); + assert!(missed_votes.contains(&shell.state, &height)?); assert!(sum.unwrap() < minimum_unsigned_blocks); } } else { - assert!(missed_votes.is_empty(&shell.wl_storage)?); + assert!(missed_votes.is_empty(&shell.state)?); assert_eq!(sum, Some(0u64)); } } @@ -4847,30 +4650,29 @@ mod test_finalize_block { votes_no2.clone(), None, ); - if shell.wl_storage.storage.update_epoch_blocks_delay == Some(1) { + if shell.state.in_mem().update_epoch_blocks_delay == Some(1) { break; } } - assert_eq!(shell.wl_storage.storage.block.epoch, current_epoch); + assert_eq!(shell.state.in_mem().block.epoch, current_epoch); let pipeline_vals = read_consensus_validator_set_addresses( - &shell.wl_storage, + &shell.state, current_epoch + params.pipeline_len, )?; assert_eq!(pipeline_vals.len(), initial_consensus_set.len() - 1); let val2_sum_missed_votes = - liveness_sum_missed_votes_handle().get(&shell.wl_storage, &val2)?; + liveness_sum_missed_votes_handle().get(&shell.state, &val2)?; assert_eq!( val2_sum_missed_votes, Some( - shell.wl_storage.storage.block.height.0 - - first_height_without_vote + shell.state.in_mem().block.height.0 - first_height_without_vote ) ); for val in &initial_consensus_set { if val == &val2 { continue; } - let sum = sum_missed_votes.get(&shell.wl_storage, val)?; + let sum = sum_missed_votes.get(&shell.state, val)?; assert_eq!(sum, Some(0u64)); } @@ -4882,16 +4684,16 @@ mod test_finalize_block { votes_no2.clone(), None, ); - current_epoch = shell.wl_storage.storage.block.epoch; + current_epoch = shell.state.in_mem().block.epoch; assert_eq!(current_epoch, Epoch(2)); let val2_sum_missed_votes = - liveness_sum_missed_votes_handle().get(&shell.wl_storage, &val2)?; + liveness_sum_missed_votes_handle().get(&shell.state, &val2)?; assert_eq!(val2_sum_missed_votes, Some(minimum_unsigned_blocks)); // Check the validator sets for all epochs up through the pipeline let consensus_vals = read_consensus_validator_set_addresses( - &shell.wl_storage, + &shell.state, current_epoch, )?; assert_eq!( @@ -4905,7 +4707,7 @@ mod test_finalize_block { ); for offset in 1..=params.pipeline_len { let consensus_vals = read_consensus_validator_set_addresses( - &shell.wl_storage, + &shell.state, current_epoch + offset, )?; assert_eq!( @@ -4913,11 +4715,11 @@ mod test_finalize_block { HashSet::from_iter([val1.clone(), val3.clone(), val4.clone()]) ); let val2_state = validator_state_handle(&val2) - .get(&shell.wl_storage, current_epoch + offset, ¶ms)? + .get(&shell.state, current_epoch + offset, ¶ms)? .unwrap(); assert_eq!(val2_state, ValidatorState::Jailed); let val5_state = validator_state_handle(&val5) - .get(&shell.wl_storage, current_epoch + offset, ¶ms)? + .get(&shell.state, current_epoch + offset, ¶ms)? .unwrap(); assert_eq!(val5_state, ValidatorState::BelowThreshold); } @@ -4926,26 +4728,26 @@ mod test_finalize_block { // there, 5 should be removed) for val in &initial_consensus_set { let missed_votes = liveness_missed_votes_handle().at(val); - let sum = sum_missed_votes.get(&shell.wl_storage, val)?; + let sum = sum_missed_votes.get(&shell.state, val)?; if val == &val2 { assert_eq!( sum, Some( - shell.wl_storage.storage.block.height.0 + shell.state.in_mem().block.height.0 - first_height_without_vote ) ); for height in first_height_without_vote - ..shell.wl_storage.storage.block.height.0 + ..shell.state.in_mem().block.height.0 { - assert!(missed_votes.contains(&shell.wl_storage, &height)?); + assert!(missed_votes.contains(&shell.state, &height)?); } } else if val == &val5 { - assert!(missed_votes.is_empty(&shell.wl_storage)?); + assert!(missed_votes.is_empty(&shell.state)?); assert!(sum.is_none()); } else { - assert!(missed_votes.is_empty(&shell.wl_storage)?); + assert!(missed_votes.is_empty(&shell.state)?); assert_eq!(sum, Some(0u64)); } } @@ -4955,8 +4757,8 @@ mod test_finalize_block { let next_epoch = current_epoch.next(); loop { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None).0; if current_epoch == next_epoch { @@ -4967,9 +4769,9 @@ mod test_finalize_block { // Check that the liveness data only contains data for vals 1, 3, and 4 for val in &initial_consensus_set { let missed_votes = liveness_missed_votes_handle().at(val); - let sum = sum_missed_votes.get(&shell.wl_storage, val)?; + let sum = sum_missed_votes.get(&shell.state, val)?; - assert!(missed_votes.is_empty(&shell.wl_storage)?); + assert!(missed_votes.is_empty(&shell.state)?); if val == &val2 || val == &val5 { assert!(sum.is_none()); } else { @@ -4979,13 +4781,13 @@ mod test_finalize_block { // Validator 2 unjail itself namada_proof_of_stake::unjail_validator( - &mut shell.wl_storage, + &mut shell.state, &val2, current_epoch, )?; let pipeline_epoch = current_epoch + params.pipeline_len; let val2_pipeline_state = validator_state_handle(&val2).get( - &shell.wl_storage, + &shell.state, pipeline_epoch, ¶ms, )?; @@ -4994,8 +4796,8 @@ mod test_finalize_block { // Advance to the pipeline epoch loop { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None).0; if current_epoch == pipeline_epoch { @@ -5003,11 +4805,11 @@ mod test_finalize_block { } } let sum_liveness = liveness_sum_missed_votes_handle(); - assert_eq!(sum_liveness.get(&shell.wl_storage, &val1)?, Some(0u64)); - assert_eq!(sum_liveness.get(&shell.wl_storage, &val2)?, None); - assert_eq!(sum_liveness.get(&shell.wl_storage, &val3)?, Some(0u64)); - assert_eq!(sum_liveness.get(&shell.wl_storage, &val4)?, Some(0u64)); - assert_eq!(sum_liveness.get(&shell.wl_storage, &val5)?, None); + assert_eq!(sum_liveness.get(&shell.state, &val1)?, Some(0u64)); + assert_eq!(sum_liveness.get(&shell.state, &val2)?, None); + assert_eq!(sum_liveness.get(&shell.state, &val3)?, Some(0u64)); + assert_eq!(sum_liveness.get(&shell.state, &val4)?, Some(0u64)); + assert_eq!(sum_liveness.get(&shell.state, &val5)?, None); Ok(()) } @@ -5044,15 +4846,15 @@ mod test_finalize_block { consensus_votes: &[VoteInfo], misbehaviors: Option>, ) -> (Epoch, token::Amount) { - let current_epoch = shell.wl_storage.storage.block.epoch; + let current_epoch = shell.state.in_mem().block.epoch; let staking_token = - namada_proof_of_stake::staking_token_address(&shell.wl_storage); + namada_proof_of_stake::staking_token_address(&shell.state); // NOTE: assumed that the only change in pos address balance by // advancing to the next epoch is minted inflation - no change occurs // due to slashing let pos_balance_pre = shell - .wl_storage + .state .read::(&token::storage_key::balance_key( &staking_token, &pos_address, @@ -5066,12 +4868,12 @@ mod test_finalize_block { consensus_votes.to_owned(), misbehaviors.clone(), ); - if shell.wl_storage.storage.block.epoch == current_epoch.next() { + if shell.state.in_mem().block.epoch == current_epoch.next() { break; } } let pos_balance_post = shell - .wl_storage + .state .read::(&token::storage_key::balance_key( &staking_token, &pos_address, @@ -5080,7 +4882,7 @@ mod test_finalize_block { .unwrap_or_default(); ( - shell.wl_storage.storage.block.epoch, + shell.state.in_mem().block.epoch, pos_balance_post - pos_balance_pre, ) } @@ -5092,7 +4894,7 @@ mod test_finalize_block { setup_at_height(3u64); let proposal_execution_key = get_proposal_execution_key(0); shell - .wl_storage + .state .write(&proposal_execution_key, 0u64) .expect("Test failed."); let mut tx = Tx::new(shell.chain_id.clone(), None); @@ -5101,21 +4903,22 @@ mod test_finalize_block { NonZeroU64::new_unchecked(42) }); shell - .wl_storage + .state .write(&min_confirmations_key(), new_min_confirmations) .expect("Test failed"); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let keys_changed = BTreeSet::from([min_confirmations_key()]); let verifiers = BTreeSet::default(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = namada::ledger::native_vp::Ctx::new( shell.mode.get_validator_address().expect("Test failed"), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + shell.state.read_only(), &tx, &TxIndex(0), - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -5128,9 +4931,9 @@ mod test_finalize_block { // we advance forward to the next epoch let mut req = FinalizeBlock::default(); - req.header.time = namada::types::time::DateTimeUtc::now(); + req.header.time = namada::core::time::DateTimeUtc::now(); let current_decision_height = shell.get_current_decision_height(); - if let Some(b) = shell.wl_storage.storage.last_block.as_mut() { + if let Some(b) = shell.state.in_mem_mut().last_block.as_mut() { b.height = current_decision_height + 11; } shell.finalize_block(req).expect("Test failed"); @@ -5138,17 +4941,17 @@ mod test_finalize_block { let consensus_set: Vec = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() .into_iter() .collect(); - let params = read_pos_params(&shell.wl_storage).unwrap(); + let params = read_pos_params(&shell.state).unwrap(); let val1 = consensus_set[0].clone(); let pkh1 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val1.address.clone(), Epoch::default(), diff --git a/crates/apps/src/lib/node/ledger/shell/governance.rs b/crates/apps/src/lib/node/ledger/shell/governance.rs index 119bc789b2..e568f5e212 100644 --- a/crates/apps/src/lib/node/ledger/shell/governance.rs +++ b/crates/apps/src/lib/node/ledger/shell/governance.rs @@ -1,5 +1,8 @@ use std::collections::HashMap; +use namada::core::encode; +use namada::core::event::EmitEvents; +use namada::core::storage::Epoch; use namada::governance::pgf::storage::keys as pgf_storage; use namada::governance::pgf::storage::steward::StewardDetail; use namada::governance::pgf::{storage as pgf, ADDRESS}; @@ -12,32 +15,43 @@ use namada::governance::utils::{ VotePower, }; use namada::governance::{storage as gov_api, ADDRESS as gov_address}; +use namada::ibc; use namada::ledger::governance::utils::ProposalEvent; use namada::ledger::pos::BondId; -use namada::ledger::protocol; use namada::proof_of_stake::bond_amount; use namada::proof_of_stake::parameters::PosParams; use namada::proof_of_stake::storage::read_total_stake; -use namada::state::{DBIter, StorageHasher, StorageWrite, DB}; +use namada::state::StorageWrite; use namada::tx::{Code, Data}; -use namada::types::address::Address; -use namada::types::encode; -use namada::types::storage::Epoch; -use namada::{ibc, token}; use namada_sdk::proof_of_stake::storage::read_validator_stake; use super::utils::force_read; use super::*; +pub fn finalize_block( + shell: &mut Shell, + events: &mut impl EmitEvents, + is_new_epoch: bool, +) -> Result<()> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + if is_new_epoch { + execute_governance_proposals(shell, events)?; + } + Ok(()) +} + #[derive(Default)] pub struct ProposalsResult { passed: Vec, rejected: Vec, } -pub fn execute_governance_proposals( +fn execute_governance_proposals( shell: &mut Shell, - response: &mut shim::response::FinalizeBlock, + events: &mut impl EmitEvents, ) -> Result where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, @@ -52,41 +66,37 @@ where let proposal_author_key = gov_storage::get_author_key(id); let funds: token::Amount = - force_read(&shell.wl_storage, &proposal_funds_key)?; + force_read(&shell.state, &proposal_funds_key)?; let proposal_end_epoch: Epoch = - force_read(&shell.wl_storage, &proposal_end_epoch_key)?; + force_read(&shell.state, &proposal_end_epoch_key)?; let proposal_type: ProposalType = - force_read(&shell.wl_storage, &proposal_type_key)?; + force_read(&shell.state, &proposal_type_key)?; let proposal_author: Address = - force_read(&shell.wl_storage, &proposal_author_key)?; + force_read(&shell.state, &proposal_author_key)?; - let is_steward = pgf::is_steward(&shell.wl_storage, &proposal_author)?; + let is_steward = pgf::is_steward(&shell.state, &proposal_author)?; - let params = read_pos_params(&shell.wl_storage)?; + let params = read_pos_params(&shell.state)?; let total_voting_power = - read_total_stake(&shell.wl_storage, ¶ms, proposal_end_epoch)?; + read_total_stake(&shell.state, ¶ms, proposal_end_epoch)?; let tally_type = TallyType::from(proposal_type.clone(), is_steward); let votes = compute_proposal_votes( - &shell.wl_storage, + &shell.state, ¶ms, id, proposal_end_epoch, )?; let proposal_result = compute_proposal_result(votes, total_voting_power, tally_type); - gov_api::write_proposal_result( - &mut shell.wl_storage, - id, - proposal_result, - )?; + gov_api::write_proposal_result(&mut shell.state, id, proposal_result)?; let transfer_address = match proposal_result.result { TallyResult::Passed => { let proposal_event = match proposal_type { ProposalType::Default(_) => { let proposal_code = - gov_api::get_proposal_code(&shell.wl_storage, id)?; + gov_api::get_proposal_code(&shell.state, id)?; let result = execute_default_proposal( shell, id, @@ -113,7 +123,7 @@ where } ProposalType::PGFSteward(stewards) => { let result = execute_pgf_steward_proposal( - &mut shell.wl_storage, + &mut shell.state, stewards, )?; tracing::info!( @@ -126,10 +136,9 @@ where .into() } ProposalType::PGFPayment(payments) => { - let native_token = - &shell.wl_storage.get_native_token()?; + let native_token = &shell.state.get_native_token()?; let result = execute_pgf_funding_proposal( - &mut shell.wl_storage, + &mut shell.state, native_token, payments, id, @@ -141,33 +150,31 @@ where ); for ibc_event in - shell.wl_storage.write_log_mut().take_ibc_events() + shell.state.write_log_mut().take_ibc_events() { let mut event = Event::from(ibc_event.clone()); // Add the height for IBC event query - let height = shell - .wl_storage - .storage - .get_last_block_height() - + 1; + let height = + shell.state.in_mem().get_last_block_height() + + 1; event["height"] = height.to_string(); - response.events.push(event); + events.emit(event); } ProposalEvent::pgf_payments_proposal_event(id, result) .into() } }; - response.events.push(proposal_event); + events.emit(proposal_event); proposals_result.passed.push(id); - gov_api::get_proposal_author(&shell.wl_storage, id)? + gov_api::get_proposal_author(&shell.state, id)? } TallyResult::Rejected => { if let ProposalType::PGFPayment(_) = proposal_type { if proposal_result.two_thirds_nay_over_two_thirds_total() { pgf::remove_steward( - &mut shell.wl_storage, + &mut shell.state, &proposal_author, )?; @@ -183,7 +190,7 @@ where } let proposal_event = ProposalEvent::rejected_proposal_event(id).into(); - response.events.push(proposal_event); + events.emit(proposal_event); proposals_result.rejected.push(id); tracing::info!( @@ -195,10 +202,10 @@ where } }; - let native_token = shell.wl_storage.get_native_token()?; + let native_token = shell.state.get_native_token()?; if let Some(address) = transfer_address { token::transfer( - &mut shell.wl_storage, + &mut shell.state, &native_token, &gov_address, &address, @@ -206,7 +213,7 @@ where )?; } else { token::burn_tokens( - &mut shell.wl_storage, + &mut shell.state, &native_token, &gov_address, funds, @@ -290,7 +297,7 @@ where { if let Some(code) = proposal_code { let pending_execution_key = gov_storage::get_proposal_execution_key(id); - shell.wl_storage.write(&pending_execution_key, ())?; + shell.state.write(&pending_execution_key, ())?; let mut tx = Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); tx.header.chain_id = shell.chain_id.clone(); @@ -303,28 +310,27 @@ where * based on the code size. We dont * need it here. */ TxIndex::default(), - &mut TxGasMeter::new_from_sub_limit(u64::MAX.into()), /* No gas limit for governance proposal */ - &mut shell.wl_storage, + &RefCell::new(TxGasMeter::new_from_sub_limit(u64::MAX.into())), /* No gas limit for governance proposal */ + &mut shell.state, &mut shell.vp_wasm_cache, &mut shell.tx_wasm_cache, None, ); shell - .wl_storage - .storage + .state .delete(&pending_execution_key) .expect("Should be able to delete the storage."); match tx_result { Ok(tx_result) => { if tx_result.is_accepted() { - shell.wl_storage.commit_tx(); + shell.state.commit_tx(); Ok(true) } else { Ok(false) } } Err(_) => { - shell.wl_storage.drop_tx(); + shell.state.drop_tx(); Ok(false) } } @@ -363,7 +369,7 @@ where } fn execute_pgf_funding_proposal( - storage: &mut WlStorage, + state: &mut WlState, token: &Address, fundings: BTreeSet, proposal_id: u64, @@ -377,7 +383,7 @@ where PGFAction::Continuous(action) => match action { AddRemove::Add(target) => { pgf_storage::fundings_handle().insert( - storage, + state, target.target().clone(), StoragePgfFunding::new(target.clone(), proposal_id), )?; @@ -391,7 +397,7 @@ where } AddRemove::Remove(target) => { pgf_storage::fundings_handle() - .remove(storage, &target.target())?; + .remove(state, &target.target())?; tracing::info!( "Removed ContinousPgf from proposal id {}: set {} to \ {}.", @@ -404,14 +410,14 @@ where PGFAction::Retro(target) => { let result = match &target { PGFTarget::Internal(target) => token::transfer( - storage, + state, token, &ADDRESS, &target.target, target.amount, ), PGFTarget::Ibc(target) => { - ibc::transfer_over_ibc(storage, token, &ADDRESS, target) + ibc::transfer_over_ibc(state, token, &ADDRESS, target) } }; match result { diff --git a/crates/apps/src/lib/node/ledger/shell/init_chain.rs b/crates/apps/src/lib/node/ledger/shell/init_chain.rs index bfe6922ed8..6c6db98860 100644 --- a/crates/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/crates/apps/src/lib/node/ledger/shell/init_chain.rs @@ -6,14 +6,13 @@ use masp_primitives::merkle_tree::CommitmentTree; use masp_primitives::sapling::Node; use masp_proofs::bls12_381; use namada::account::protocol_pk_key; +use namada::core::hash::Hash as CodeHash; +use namada::core::time::{TimeZone, Utc}; use namada::ledger::parameters::Parameters; use namada::ledger::{ibc, pos}; use namada::proof_of_stake::BecomeValidator; -use namada::state::{DBIter, StorageHasher, StorageWrite, DB}; +use namada::state::StorageWrite; use namada::token::{credit_tokens, write_denom}; -use namada::types::address::Address; -use namada::types::hash::Hash as CodeHash; -use namada::types::time::{DateTimeUtc, TimeZone, Utc}; use namada::vm::validate_untrusted_wasm; use namada_sdk::eth_bridge::EthBridgeStatus; use namada_sdk::proof_of_stake::PosParams; @@ -27,7 +26,6 @@ use crate::config::genesis::templates::{TokenBalances, TokenConfig}; use crate::config::genesis::transactions::{ BondTx, EstablishedAccountTx, Signed as SignedTx, ValidatorAccountTx, }; -use crate::facade::tendermint::v0_37::abci::{request, response}; use crate::facade::tendermint_proto::google::protobuf; use crate::wasm_loader; @@ -89,7 +87,7 @@ where #[cfg(any(test, feature = "testing"))] _num_validators: u64, ) -> Result { let mut response = response::InitChain::default(); - let chain_id = self.wl_storage.storage.chain_id.as_str(); + let chain_id = self.state.in_mem().chain_id.as_str(); if chain_id != init.chain_id.as_str() { return Err(Error::ChainId(format!( "Current chain ID: {}, Tendermint chain ID: {}", @@ -123,7 +121,7 @@ where { // update the native token from the genesis file let native_token = genesis.get_native_token().clone(); - self.wl_storage.storage.native_token = native_token; + self.state.in_mem_mut().native_token = native_token; } let mut validation = InitChainValidation::new(self, false); validation.run( @@ -141,22 +139,20 @@ where let anchor = empty_commitment_tree.root(); let note_commitment_tree_key = token::storage_key::masp_commitment_tree_key(); - self.wl_storage + self.state .write(¬e_commitment_tree_key, empty_commitment_tree) .unwrap(); let commitment_tree_anchor_key = token::storage_key::masp_commitment_anchor_key(anchor); - self.wl_storage - .write(&commitment_tree_anchor_key, ()) - .unwrap(); + self.state.write(&commitment_tree_anchor_key, ()).unwrap(); // Init masp convert anchor let convert_anchor_key = token::storage_key::masp_convert_anchor_key(); - self.wl_storage.write( + self.state.write( &convert_anchor_key, - namada::types::hash::Hash( + namada::core::hash::Hash( bls12_381::Scalar::from( - self.wl_storage.storage.conversion_state.tree.root(), + self.state.in_mem().conversion_state.tree.root(), ) .to_bytes(), ), @@ -189,10 +185,7 @@ where #[cfg(any(test, feature = "testing"))] _num_validators: u64, ) -> ControlFlow<()> { let ts: protobuf::Timestamp = init.time.into(); - let initial_height = init - .initial_height - .try_into() - .expect("Unexpected block height"); + let initial_height = init.initial_height.into(); // TODO hacky conversion, depends on https://github.com/informalsystems/tendermint-rs/issues/870 let genesis_time: DateTimeUtc = (Utc .timestamp_opt(ts.seconds, ts.nanos as u32)) @@ -203,19 +196,19 @@ where // Initialize protocol parameters let parameters = genesis.get_chain_parameters(&self.wasm_dir); self.store_wasms(¶meters)?; - parameters::init_storage(¶meters, &mut self.wl_storage).unwrap(); + parameters::init_storage(¶meters, &mut self.state).unwrap(); // Initialize governance parameters let gov_params = genesis.get_gov_params(); - gov_params.init_storage(&mut self.wl_storage).unwrap(); + gov_params.init_storage(&mut self.state).unwrap(); // configure the Ethereum bridge if the configuration is set. if let Some(config) = genesis.get_eth_bridge_params() { tracing::debug!("Initializing Ethereum bridge storage."); - config.init_storage(&mut self.wl_storage); + config.init_storage(&mut self.state); self.update_eth_oracle(&Default::default()); } else { - self.wl_storage + self.state .write( &namada::eth_bridge::storage::active_key(), EthBridgeStatus::Disabled, @@ -224,16 +217,16 @@ where } // Depends on parameters being initialized - self.wl_storage - .storage + self.state + .in_mem_mut() .init_genesis_epoch(initial_height, genesis_time, ¶meters) .expect("Initializing genesis epoch must not fail"); // PoS system depends on epoch being initialized let pos_params = genesis.get_pos_params(); - let (current_epoch, _gas) = self.wl_storage.storage.get_current_epoch(); + let (current_epoch, _gas) = self.state.in_mem().get_current_epoch(); pos::namada_proof_of_stake::init_genesis( - &mut self.wl_storage, + &mut self.state, &pos_params, current_epoch, ) @@ -242,7 +235,7 @@ where // PGF parameters let pgf_params = genesis.get_pgf_params(); pgf_params - .init_storage(&mut self.wl_storage) + .init_storage(&mut self.state) .expect("Should be able to initialized PGF at genesis"); // Loaded VP code cache to avoid loading the same files multiple times @@ -259,19 +252,19 @@ where self.apply_genesis_txs_bonds(&genesis); pos::namada_proof_of_stake::compute_and_store_total_consensus_stake( - &mut self.wl_storage, + &mut self.state, current_epoch, ) .expect("Could not compute total consensus stake at genesis"); // This has to be done after `apply_genesis_txs_validator_account` pos::namada_proof_of_stake::copy_genesis_validator_sets( - &mut self.wl_storage, + &mut self.state, &pos_params, current_epoch, ) .expect("Must be able to copy PoS genesis validator sets"); - ibc::init_genesis_storage(&mut self.wl_storage); + ibc::init_genesis_storage(&mut self.state); ControlFlow::Continue(()) } @@ -283,8 +276,8 @@ where vp_cache: &mut HashMap>, ) -> ControlFlow<(), Vec> { use std::collections::hash_map::Entry; - let Some(vp_filename) = - self.validate( + let Some(vp_filename) = self + .validate( genesis .vps .wasm @@ -292,8 +285,10 @@ where .map(|conf| conf.filename.clone()) .ok_or_else(|| { Panic::MissingVpWasmConfig(name.to_string()) - })) - .or_placeholder(None)? else { + }), + ) + .or_placeholder(None)? + else { return self.proceed_with(vec![]); }; let code = match vp_cache.entry(vp_filename.clone()) { @@ -323,10 +318,13 @@ where } = params; let mut is_implicit_vp_stored = false; - let Some(checksums) = self.validate( - wasm_loader::Checksums::read_checksums(&self.wasm_dir) - .map_err(|_| Panic::ChecksumsFile) - ).or_placeholder(None)? else { + let Some(checksums) = self + .validate( + wasm_loader::Checksums::read_checksums(&self.wasm_dir) + .map_err(|_| Panic::ChecksumsFile), + ) + .or_placeholder(None)? + else { return self.proceed_with(()); }; @@ -393,15 +391,13 @@ where let hash_key = Key::wasm_hash(name); let code_name_key = Key::wasm_code_name(name.to_owned()); - self.wl_storage.write_bytes(&code_key, code).unwrap(); - self.wl_storage.write(&code_len_key, code_len).unwrap(); - self.wl_storage.write_bytes(&hash_key, code_hash).unwrap(); + self.state.write_bytes(&code_key, code).unwrap(); + self.state.write(&code_len_key, code_len).unwrap(); + self.state.write_bytes(&hash_key, code_hash).unwrap(); if &Some(code_hash) == implicit_vp_code_hash { is_implicit_vp_stored = true; } - self.wl_storage - .write_bytes(&code_name_key, code_hash) - .unwrap(); + self.state.write_bytes(&code_name_key, code_hash).unwrap(); } else { tracing::warn!("The wasm {name} isn't allowed."); self.warn(Warning::DisallowedWasm(name.to_string())); @@ -431,10 +427,10 @@ where config: TokenConfig { denom, masp_params }, } = token; // associate a token with its denomination. - write_denom(&mut self.wl_storage, address, *denom).unwrap(); + write_denom(&mut self.state, address, *denom).unwrap(); namada::token::write_params( masp_params, - &mut self.wl_storage, + &mut self.state, address, denom, ) @@ -443,8 +439,8 @@ where // add token addresses to the masp reward conversions lookup // table. let alias = alias.to_string(); - self.wl_storage - .storage + self.state + .in_mem_mut() .conversion_state .tokens .insert(alias, address.clone()); @@ -460,22 +456,27 @@ where for (token_alias, TokenBalances(balances)) in &genesis.balances.token { tracing::debug!("Initializing token balances {token_alias}"); - let Some(token_address) = self.validate(genesis - .tokens - .token - .get(token_alias) - .ok_or_else(|| Panic::MissingTokenConfig(token_alias.to_string())) - .map(|conf| &conf.address) - ) - .or_placeholder(None)? else { - continue + let Some(token_address) = self + .validate( + genesis + .tokens + .token + .get(token_alias) + .ok_or_else(|| { + Panic::MissingTokenConfig(token_alias.to_string()) + }) + .map(|conf| &conf.address), + ) + .or_placeholder(None)? + else { + continue; }; let mut total_token_balance = token::Amount::zero(); for (owner, balance) in balances { if let genesis::GenesisAddress::PublicKey(pk) = owner { namada::account::init_account_storage( - &mut self.wl_storage, + &mut self.state, &owner.address(), std::slice::from_ref(&pk.raw), 1, @@ -489,7 +490,7 @@ where owner, ); credit_tokens( - &mut self.wl_storage, + &mut self.state, token_address, &owner.address(), balance.amount(), @@ -498,7 +499,7 @@ where total_token_balance += balance.amount(); } // Write the total amount of tokens for the ratio - self.wl_storage + self.state .write( &token::storage_key::minted_balance_key(token_address), total_token_balance, @@ -530,14 +531,14 @@ where ); let vp_code = self.lookup_vp("vp_user", genesis, vp_cache)?; let code_hash = CodeHash::sha256(&vp_code); - self.wl_storage + self.state .write_bytes(&Key::validity_predicate(address), code_hash) .unwrap(); let public_keys: Vec<_> = public_keys.iter().map(|pk| pk.raw.clone()).collect(); namada::account::init_account_storage( - &mut self.wl_storage, + &mut self.state, address, &public_keys, *threshold, @@ -554,7 +555,7 @@ where genesis: &genesis::chain::Finalized, vp_cache: &mut HashMap>, params: &PosParams, - current_epoch: namada::types::storage::Epoch, + current_epoch: namada::core::storage::Epoch, ) -> ControlFlow<()> { if let Some(txs) = genesis.transactions.validator_account.as_ref() { for FinalizedValidatorAccountTx { @@ -586,18 +587,18 @@ where let vp_code = self.lookup_vp("vp_user", genesis, vp_cache)?; let code_hash = CodeHash::sha256(&vp_code); - self.wl_storage + self.state .write_bytes(&Key::validity_predicate(address), code_hash) .expect("Unable to write user VP"); - self.wl_storage + self.state .write(&protocol_pk_key(address), &protocol_key.pk.raw) .expect("Unable to set genesis user protocol public key"); // TODO: replace pos::init_genesis validators arg with // init_genesis_validator from here if let Err(err) = pos::namada_proof_of_stake::become_validator( - &mut self.wl_storage, + &mut self.state, BecomeValidator { params, address, @@ -629,7 +630,7 @@ where /// Apply genesis txs to transfer tokens fn apply_genesis_txs_bonds(&mut self, genesis: &genesis::chain::Finalized) { - let (current_epoch, _gas) = self.wl_storage.storage.get_current_epoch(); + let (current_epoch, _gas) = self.state.in_mem().get_current_epoch(); if let Some(txs) = &genesis.transactions.bond { for BondTx { source, @@ -645,7 +646,7 @@ where ); if let Err(err) = pos::namada_proof_of_stake::bond_tokens( - &mut self.wl_storage, + &mut self.state, Some(&source.address()), validator, amount.amount(), @@ -938,13 +939,12 @@ mod test { use std::collections::BTreeMap; use std::str::FromStr; - use namada::state::DBIter; - use namada::types::string_encoding::StringEncoded; + use namada::core::string_encoding::StringEncoded; use namada_sdk::wallet::alias::Alias; use super::*; use crate::config::genesis::{transactions, GenesisAddress}; - use crate::node::ledger::shell::test_utils::{self, TestShell}; + use crate::node::ledger::shell::test_utils::TestShell; use crate::wallet::defaults; /// Test that the init-chain handler never commits changes directly to the @@ -956,9 +956,8 @@ mod test { // Collect all storage key-vals into a sorted map let store_block_state = |shell: &TestShell| -> BTreeMap<_, _> { shell - .wl_storage - .storage - .db + .state + .db() .iter_prefix(None) .map(|(key, val, _gas)| (key, val)) .collect() @@ -997,10 +996,9 @@ mod test { *vp_cache.get("vp_user.wasm").expect("Test failed"), Vec::::new() ); - let [Panic::ReadingWasm(_, _)]: [Panic; 1] = initializer.panics - .clone() - .try_into() - .expect("Test failed") else { + let [Panic::ReadingWasm(_, _)]: [Panic; 1] = + initializer.panics.clone().try_into().expect("Test failed") + else { panic!("Test failed") }; @@ -1008,10 +1006,9 @@ mod test { genesis.vps.wasm.remove("vp_user").expect("Test failed"); let code = initializer.lookup_vp("vp_user", &genesis, &mut vp_cache); assert_eq!(code, ControlFlow::Continue(vec![])); - let [Panic::MissingVpWasmConfig(_)]: [Panic; 1] = initializer.panics - .clone() - .try_into() - .expect("Test failed") else { + let [Panic::MissingVpWasmConfig(_)]: [Panic; 1] = + initializer.panics.clone().try_into().expect("Test failed") + else { panic!("Test failed") }; } @@ -1052,10 +1049,9 @@ mod test { .store_wasms(&genesis.get_chain_parameters(test_dir.path())); assert_eq!(res, ControlFlow::Continue(())); let errors = initializer.errors.iter().collect::>(); - let [ - Error::ReadingWasm(_), - Error::LoadingWasm(_), - ]: [&Error; 2] = errors.try_into().expect("Test failed") else { + let [Error::ReadingWasm(_), Error::LoadingWasm(_)]: [&Error; 2] = + errors.try_into().expect("Test failed") + else { panic!("Test failed"); }; let expected_panics = vec![ @@ -1082,7 +1078,8 @@ mod test { Error::ReadingWasm(_), Error::LoadingWasm(_), Error::LoadingWasm(_), - ]: [&Error; 3] = errors.try_into().expect("Test failed") else { + ]: [&Error; 3] = errors.try_into().expect("Test failed") + else { panic!("Test failed"); }; let expected_panics = vec![Panic::MissingImplicitVP("None".into())]; @@ -1106,10 +1103,9 @@ mod test { .expect("Test failed"); let res = initializer.init_token_balances(&genesis); assert_eq!(res, ControlFlow::Continue(())); - let [Panic::MissingTokenConfig(_)]: [Panic; 1] = initializer.panics - .clone() - .try_into() - .expect("Test failed") else { + let [Panic::MissingTokenConfig(_)]: [Panic; 1] = + initializer.panics.clone().try_into().expect("Test failed") + else { panic!("Test failed") }; } @@ -1135,15 +1131,13 @@ mod test { }; // Initialize governance parameters let gov_params = genesis.get_gov_params(); - gov_params - .init_storage(&mut initializer.wl_storage) - .unwrap(); + gov_params.init_storage(&mut initializer.state).unwrap(); // PoS system depends on epoch being initialized let pos_params = genesis.get_pos_params(); let (current_epoch, _gas) = - initializer.wl_storage.storage.get_current_epoch(); + initializer.state.in_mem().get_current_epoch(); pos::namada_proof_of_stake::init_genesis( - &mut initializer.wl_storage, + &mut initializer.state, &pos_params, current_epoch, ) diff --git a/crates/apps/src/lib/node/ledger/shell/mod.rs b/crates/apps/src/lib/node/ledger/shell/mod.rs index d922969e9f..e144794e2c 100644 --- a/crates/apps/src/lib/node/ledger/shell/mod.rs +++ b/crates/apps/src/lib/node/ledger/shell/mod.rs @@ -10,8 +10,10 @@ mod finalize_block; mod governance; mod init_chain; pub use init_chain::InitChainValidation; +use namada_sdk::state::StateRead; use namada_sdk::tx::data::GasLimit; pub mod prepare_proposal; +use namada::state::State; pub mod process_proposal; pub(super) mod queries; mod stats; @@ -21,9 +23,8 @@ pub mod testing; pub mod utils; mod vote_extensions; +use std::cell::RefCell; use std::collections::BTreeSet; -use std::convert::{TryFrom, TryInto}; -use std::mem; use std::path::{Path, PathBuf}; #[allow(unused_imports)] use std::rc::Rc; @@ -31,7 +32,13 @@ use std::rc::Rc; use borsh::BorshDeserialize; use borsh_ext::BorshSerializeExt; use masp_primitives::transaction::Transaction; -use namada::core::hints; +use namada::core::address::Address; +use namada::core::chain::ChainId; +use namada::core::ethereum_events::EthereumEvent; +use namada::core::key::*; +use namada::core::storage::{BlockHeight, Key, TxIndex}; +use namada::core::time::DateTimeUtc; +use namada::core::{address, hints}; use namada::ethereum_bridge::protocol::validation::bridge_pool_roots::validate_bp_roots_vext; use namada::ethereum_bridge::protocol::validation::ethereum_events::validate_eth_events_vext; use namada::ethereum_bridge::protocol::validation::validator_set_update::validate_valset_upd_vext; @@ -45,29 +52,18 @@ use namada::ledger::protocol::{ apply_wasm_tx, get_fee_unshielding_transaction, get_transfer_hash_from_storage, ShellParams, }; -use namada::ledger::{parameters, pos, protocol}; +use namada::ledger::{parameters, protocol}; use namada::parameters::validate_tx_bytes; -use namada::proof_of_stake::slashing::{process_slashes, slash}; use namada::proof_of_stake::storage::read_pos_params; -use namada::proof_of_stake::{self}; use namada::state::tx_queue::{ExpiredTx, TxInQueue}; -use namada::state::wl_storage::WriteLogAndStorage; -use namada::state::write_log::WriteLog; use namada::state::{ - DBIter, Sha256Hasher, State, StorageHasher, StorageRead, TempWlStorage, - WlStorage, DB, EPOCH_SWITCH_BLOCKS_DELAY, + DBIter, FullAccessState, Sha256Hasher, StorageHasher, StorageRead, + TempWlState, WlState, DB, EPOCH_SWITCH_BLOCKS_DELAY, }; use namada::token; pub use namada::tx::data::ResultCode; use namada::tx::data::{DecryptedTx, TxType, WrapperTx, WrapperTxErr}; use namada::tx::{Section, Tx}; -use namada::types::address; -use namada::types::address::Address; -use namada::types::chain::ChainId; -use namada::types::ethereum_events::EthereumEvent; -use namada::types::key::*; -use namada::types::storage::{BlockHeight, Key, TxIndex}; -use namada::types::time::DateTimeUtc; use namada::vm::wasm::{TxCache, VpCache}; use namada::vm::{WasmCacheAccess, WasmCacheRwAccess}; use namada::vote_ext::EthereumTxData; @@ -78,7 +74,6 @@ use tokio::sync::mpsc::{Receiver, UnboundedSender}; use super::ethereum_oracle::{self as oracle, last_processed_block}; use crate::config::{self, genesis, TendermintMode, ValidatorLocalConfig}; -use crate::facade::tendermint::abci::types::{Misbehavior, MisbehaviorKind}; use crate::facade::tendermint::v0_37::abci::{request, response}; use crate::facade::tendermint::{self, validator}; use crate::facade::tendermint_proto::v0_37::crypto::public_key; @@ -336,10 +331,7 @@ where /// The id of the current chain pub chain_id: ChainId, /// The persistent storage with write log - pub wl_storage: WlStorage, - /// Byzantine validators given from ABCI++ `prepare_proposal` are stored in - /// this field. They will be slashed when we finalize the block. - byzantine_validators: Vec, + pub state: FullAccessState, /// Path to the base directory with DB data and configs #[allow(dead_code)] pub(crate) base_dir: PathBuf, @@ -364,7 +356,7 @@ where /// Merkle tree storage key filter. Return `false` for keys that shouldn't be /// merklized. -pub fn is_merklized_storage_key(key: &namada_sdk::types::storage::Key) -> bool { +pub fn is_merklized_storage_key(key: &namada_sdk::storage::Key) -> bool { !token::storage_key::is_masp_key(key) && !namada::ibc::storage::is_ibc_counter_key(key) } @@ -418,33 +410,35 @@ where std::fs::create_dir(&base_dir) .expect("Creating directory for Namada should not fail"); } - let native_token = if cfg!(feature = "integration") - || (!cfg!(test) && !cfg!(feature = "benches")) - { + + // For all tests except integration use hard-coded native token addr ... + #[cfg(all( + any(test, feature = "testing", feature = "benches"), + not(feature = "integration"), + ))] + let native_token = address::testing::nam(); + // ... Otherwise, look it up from the genesis file + #[cfg(not(all( + any(test, feature = "testing", feature = "benches"), + not(feature = "integration"), + )))] + let native_token = { let chain_dir = base_dir.join(chain_id.as_str()); let genesis = genesis::chain::Finalized::read_toml_files(&chain_dir) .expect("Missing genesis files"); genesis.get_native_token().clone() - } else { - address::nam() }; // load last state from storage - let mut storage = State::open( + let state = FullAccessState::open( db_path, + db_cache, chain_id.clone(), native_token, - db_cache, config.shell.storage_read_past_height_limit, is_merklized_storage_key, ); - storage - .load_last_state() - .map_err(|e| { - tracing::error!("Cannot load the last state from the DB {}", e); - }) - .expect("PersistentStorage cannot be initialized"); let vp_wasm_cache_dir = base_dir.join(chain_id.as_str()).join("vp_wasm_cache"); let tx_wasm_cache_dir = @@ -513,14 +507,9 @@ where TendermintMode::Seed => ShellMode::Seed, }; - let wl_storage = WlStorage { - storage, - write_log: WriteLog::default(), - }; let mut shell = Self { chain_id, - wl_storage, - byzantine_validators: vec![], + state, base_dir, wasm_dir, mode, @@ -556,7 +545,7 @@ where /// Iterate over the wrapper txs in order #[allow(dead_code)] fn iter_tx_queue(&mut self) -> impl Iterator { - self.wl_storage.storage.tx_queue.iter() + self.state.in_mem().tx_queue.iter() } /// Load the Merkle root hash and the height of the last committed block, if @@ -566,7 +555,7 @@ where last_block_height: tendermint::block::Height::from(0_u32), ..Default::default() }; - let result = self.wl_storage.storage.get_state(); + let result = self.state.in_mem().get_state(); match result { Some((root, height)) => { @@ -596,7 +585,7 @@ where where T: Clone + BorshDeserialize, { - let result = self.wl_storage.storage.read(key); + let result = self.state.db_read(key); match result { Ok((bytes, _gas)) => match bytes { @@ -612,7 +601,7 @@ where /// Read the bytes for a storage key dropping any error pub fn read_storage_key_bytes(&self, key: &Key) -> Option> { - let result = self.wl_storage.storage.read(key); + let result = self.state.db_read(key); match result { Ok((bytes, _gas)) => bytes, @@ -620,121 +609,12 @@ where } } - /// Apply PoS slashes from the evidence - fn record_slashes_from_evidence(&mut self) { - if !self.byzantine_validators.is_empty() { - let byzantine_validators = - mem::take(&mut self.byzantine_validators); - // TODO: resolve this unwrap() better - let pos_params = read_pos_params(&self.wl_storage).unwrap(); - let current_epoch = self.wl_storage.storage.block.epoch; - for evidence in byzantine_validators { - // dbg!(&evidence); - tracing::info!("Processing evidence {evidence:?}."); - let evidence_height = match u64::try_from(evidence.height) { - Ok(height) => height, - Err(err) => { - tracing::error!( - "Unexpected evidence block height {}", - err - ); - continue; - } - }; - let evidence_epoch = match self - .wl_storage - .storage - .block - .pred_epochs - .get_epoch(BlockHeight(evidence_height)) - { - Some(epoch) => epoch, - None => { - tracing::error!( - "Couldn't find epoch for evidence block height {}", - evidence_height - ); - continue; - } - }; - // Disregard evidences that should have already been processed - // at this time - if evidence_epoch + pos_params.slash_processing_epoch_offset() - - pos_params.cubic_slashing_window_length - <= current_epoch - { - tracing::info!( - "Skipping outdated evidence from epoch \ - {evidence_epoch}" - ); - continue; - } - let slash_type = match evidence.kind { - MisbehaviorKind::DuplicateVote => { - pos::types::SlashType::DuplicateVote - } - MisbehaviorKind::LightClientAttack => { - pos::types::SlashType::LightClientAttack - } - MisbehaviorKind::Unknown => { - tracing::error!("Unknown evidence: {:#?}", evidence); - continue; - } - }; - let validator_raw_hash = - tm_raw_hash_to_string(evidence.validator.address); - let validator = - match proof_of_stake::storage::find_validator_by_raw_hash( - &self.wl_storage, - &validator_raw_hash, - ) - .expect("Must be able to read storage") - { - Some(validator) => validator, - None => { - tracing::error!( - "Cannot find validator's address from raw \ - hash {}", - validator_raw_hash - ); - continue; - } - }; - // Check if we're gonna switch to a new epoch after a delay - let validator_set_update_epoch = - self.get_validator_set_update_epoch(current_epoch); - tracing::info!( - "Slashing {} for {} in epoch {}, block height {} (current \ - epoch = {}, validator set update epoch = \ - {validator_set_update_epoch})", - validator, - slash_type, - evidence_epoch, - evidence_height, - current_epoch - ); - if let Err(err) = slash( - &mut self.wl_storage, - &pos_params, - current_epoch, - evidence_epoch, - evidence_height, - slash_type, - &validator, - validator_set_update_epoch, - ) { - tracing::error!("Error in slashing: {}", err); - } - } - } - } - /// Get the next epoch for which we can request validator set changed pub fn get_validator_set_update_epoch( &self, - current_epoch: namada_sdk::types::storage::Epoch, - ) -> namada_sdk::types::storage::Epoch { - if let Some(delay) = self.wl_storage.storage.update_epoch_blocks_delay { + current_epoch: namada_sdk::storage::Epoch, + ) -> namada_sdk::storage::Epoch { + if let Some(delay) = self.state.in_mem().update_epoch_blocks_delay { if delay == EPOCH_SWITCH_BLOCKS_DELAY { // If we're about to update validator sets for the // upcoming epoch, we can still remove the validator @@ -751,31 +631,17 @@ where } } - /// Process and apply slashes that have already been recorded for the - /// current epoch - fn process_slashes(&mut self) { - let current_epoch = self.wl_storage.storage.block.epoch; - if let Err(err) = process_slashes(&mut self.wl_storage, current_epoch) { - tracing::error!( - "Error while processing slashes queued for epoch {}: {}", - current_epoch, - err - ); - panic!("Error while processing slashes"); - } - } - /// Commit a block. Persist the application state and return the Merkle root /// hash. pub fn commit(&mut self) -> response::Commit { self.bump_last_processed_eth_block(); - self.wl_storage + self.state .commit_block() .expect("Encountered a storage error while committing a block"); - let merkle_root = self.wl_storage.storage.merkle_root(); - let committed_height = self.wl_storage.storage.get_last_block_height(); + let merkle_root = self.state.in_mem().merkle_root(); + let committed_height = self.state.in_mem().get_last_block_height(); tracing::info!( "Committed block hash: {merkle_root}, height: {committed_height}", ); @@ -812,7 +678,7 @@ where block is {}", eth_height ); - self.wl_storage.storage.ethereum_height = Some(eth_height); + self.state.in_mem_mut().ethereum_height = Some(eth_height); } None => tracing::info!( "Ethereum oracle has not yet fully processed any Ethereum \ @@ -858,8 +724,8 @@ where fn broadcast_expired_txs(&mut self) { let eth_events = { let mut events: Vec<_> = self - .wl_storage - .storage + .state + .in_mem_mut() .expired_txs_queue .drain() .map(|expired_tx| match expired_tx { @@ -915,7 +781,7 @@ where // for the first time ever, in which case the chain hasn't been // initialized yet. let has_key = self - .wl_storage + .state .has_key(&namada::eth_bridge::storage::active_key()) .expect( "We should always be able to check whether a key exists \ @@ -928,33 +794,35 @@ where ); return; } - let Some(config) = EthereumOracleConfig::read(&self.wl_storage) else { - tracing::info!("Not starting oracle as the Ethereum bridge config couldn't be found in storage"); + let Some(config) = EthereumOracleConfig::read(&self.state) else { + tracing::info!( + "Not starting oracle as the Ethereum bridge config \ + couldn't be found in storage" + ); return; }; - let active = - if !self.wl_storage.ethbridge_queries().is_bridge_active() { - if !changed_keys - .contains(&namada::eth_bridge::storage::active_key()) - { - tracing::info!( - "Not starting oracle as the Ethereum bridge is \ - disabled" - ); - return; - } else { - tracing::info!( - "Disabling oracle as the bridge has been disabled" - ); - false - } + let active = if !self.state.ethbridge_queries().is_bridge_active() { + if !changed_keys + .contains(&namada::eth_bridge::storage::active_key()) + { + tracing::info!( + "Not starting oracle as the Ethereum bridge is \ + disabled" + ); + return; } else { - true - }; + tracing::info!( + "Disabling oracle as the bridge has been disabled" + ); + false + } + } else { + true + }; let start_block = self - .wl_storage - .storage + .state + .in_mem() .ethereum_height .clone() .unwrap_or(config.eth_start_height); @@ -1014,7 +882,7 @@ where // // NB: always keep this as the first tx check, // as it is a pretty cheap one - if !validate_tx_bytes(&self.wl_storage, tx_bytes.len()) + if !validate_tx_bytes(&self.state, tx_bytes.len()) .expect("Failed to get max tx bytes param from storage") { response.code = ResultCode::TooLarge.into(); @@ -1046,8 +914,7 @@ where // Tx expiration if let Some(exp) = tx.header.expiration { let last_block_timestamp = self - .wl_storage - .storage + .state .get_last_block_timestamp() .expect("Failed to retrieve last block timestamp"); @@ -1098,9 +965,9 @@ where ethereum_tx_data_variants::EthEventsVext::try_from(&tx), ); if let Err(err) = validate_eth_events_vext( - &self.wl_storage, + &self.state, &ext.0, - self.wl_storage.storage.get_last_block_height(), + self.state.in_mem().get_last_block_height(), ) { response.code = ResultCode::InvalidVoteExtension.into(); response.log = format!( @@ -1120,9 +987,9 @@ where ), ); if let Err(err) = validate_bp_roots_vext( - &self.wl_storage, + &self.state, &ext.0, - self.wl_storage.storage.get_last_block_height(), + self.state.in_mem().get_last_block_height(), ) { response.code = ResultCode::InvalidVoteExtension.into(); response.log = format!( @@ -1142,7 +1009,7 @@ where ), ); if let Err(err) = validate_valset_upd_vext( - &self.wl_storage, + &self.state, &ext, // n.b. only accept validator set updates // issued at the last committed epoch @@ -1152,7 +1019,7 @@ where // committed to storage, so `last_epoch` // reflects the current value of the // epoch. - self.wl_storage.storage.last_epoch, + self.state.in_mem().last_epoch, ) { response.code = ResultCode::InvalidVoteExtension.into(); response.log = format!( @@ -1187,8 +1054,7 @@ where // Max block gas let block_gas_limit: Gas = Gas::from_whole_units( - namada::parameters::get_max_block_gas(&self.wl_storage) - .unwrap(), + namada::parameters::get_max_block_gas(&self.state).unwrap(), ); if gas_meter.tx_gas_limit > block_gas_limit { response.code = ResultCode::AllocationError.into(); @@ -1201,8 +1067,7 @@ where // Replay protection check let inner_tx_hash = tx.raw_header_hash(); if self - .wl_storage - .storage + .state .has_replay_protection_entry(&tx.raw_header_hash()) .expect("Error while checking inner tx hash key in storage") { @@ -1218,14 +1083,9 @@ where let tx = Tx::try_from(tx_bytes) .expect("Deserialization shouldn't fail"); let wrapper_hash = &tx.header_hash(); - if self - .wl_storage - .storage - .has_replay_protection_entry(wrapper_hash) - .expect( - "Error while checking wrapper tx hash key in storage", - ) - { + if self.state.has_replay_protection_entry(wrapper_hash).expect( + "Error while checking wrapper tx hash key in storage", + ) { response.code = ResultCode::ReplayTx.into(); response.log = format!( "{INVALID_MSG}: Wrapper transaction hash {} already \ @@ -1239,7 +1099,7 @@ where if let Err(e) = mempool_fee_check( &wrapper, get_fee_unshielding_transaction(&tx, &wrapper), - &mut TempWlStorage::new(&self.wl_storage.storage), + &mut self.state.with_temp_write_log(), &mut self.vp_wasm_cache.clone(), &mut self.tx_wasm_cache.clone(), ) { @@ -1282,9 +1142,9 @@ where { use namada::ledger::pos::namada_proof_of_stake; - let (current_epoch, _gas) = self.wl_storage.storage.get_current_epoch(); + let (current_epoch, _gas) = self.state.in_mem().get_current_epoch(); let pos_params = - namada_proof_of_stake::storage::read_pos_params(&self.wl_storage) + namada_proof_of_stake::storage::read_pos_params(&self.state) .expect("Could not find the PoS parameters"); let validator_set_update_fn = if is_genesis { @@ -1294,7 +1154,7 @@ where }; validator_set_update_fn( - &self.wl_storage, + &self.state, &pos_params, current_epoch, |update| { @@ -1319,23 +1179,22 @@ where /// Retrieves the [`BlockHeight`] that is currently being decided. #[inline] pub fn get_current_decision_height(&self) -> BlockHeight { - self.wl_storage.get_current_decision_height() + self.state.get_current_decision_height() } /// Check if we are at a given [`BlockHeight`] offset, `height_offset`, /// within the current epoch. pub fn is_deciding_offset_within_epoch(&self, height_offset: u64) -> bool { - self.wl_storage - .is_deciding_offset_within_epoch(height_offset) + self.state.is_deciding_offset_within_epoch(height_offset) } } /// Checks that neither the wrapper nor the inner transaction have already -/// been applied. Requires a [`TempWlStorage`] to perform the check during +/// been applied. Requires a [`TempWlState`] to perform the check during /// block construction and validation pub fn replay_protection_checks( wrapper: &Tx, - temp_wl_storage: &mut TempWlStorage, + temp_state: &mut TempWlState, ) -> Result<()> where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, @@ -1344,7 +1203,7 @@ where let inner_tx_hash = wrapper.raw_header_hash(); // Check the inner tx hash only against the storage, skip the write // log - if temp_wl_storage + if temp_state .has_committed_replay_protection_entry(&inner_tx_hash) .expect("Error while checking inner tx hash key in storage") { @@ -1355,7 +1214,7 @@ where } let wrapper_hash = wrapper.header_hash(); - if temp_wl_storage + if temp_state .has_replay_protection_entry(&wrapper_hash) .expect("Error while checking wrapper tx hash key in storage") { @@ -1366,7 +1225,7 @@ where } // Write wrapper hash to WAL - temp_wl_storage + temp_state .write_tx_hash(wrapper_hash) .map_err(|e| Error::ReplayAttempt(e.to_string())) } @@ -1375,7 +1234,7 @@ where fn mempool_fee_check( wrapper: &WrapperTx, masp_transaction: Option, - temp_wl_storage: &mut TempWlStorage, + temp_state: &mut TempWlState, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, ) -> Result<()> @@ -1385,7 +1244,7 @@ where CA: 'static + WasmCacheAccess + Sync, { let minimum_gas_price = namada::ledger::parameters::read_gas_cost( - temp_wl_storage, + temp_state, &wrapper.fee.token, ) .expect("Must be able to read gas cost parameter") @@ -1398,11 +1257,11 @@ where wrapper, masp_transaction, minimum_gas_price, - temp_wl_storage, + temp_state, vp_wasm_cache, tx_wasm_cache, )?; - protocol::check_fees(temp_wl_storage, wrapper).map_err(Error::TxApply) + protocol::check_fees(temp_state, wrapper).map_err(Error::TxApply) } /// Check the validity of the fee payment, including the minimum amounts @@ -1411,7 +1270,7 @@ pub fn wrapper_fee_check( wrapper: &WrapperTx, masp_transaction: Option, minimum_gas_price: token::Amount, - temp_wl_storage: &mut TempWlStorage, + temp_state: &mut TempWlState, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, ) -> Result<()> @@ -1423,7 +1282,7 @@ where match token::denom_to_amount( wrapper.fee.amount_per_gas_unit, &wrapper.fee.token, - temp_wl_storage, + temp_state, ) { Ok(amount_per_gas_unit) if amount_per_gas_unit < minimum_gas_price => { // The fees do not match the minimum required @@ -1449,7 +1308,7 @@ where fee_unshielding_validation( wrapper, transaction, - temp_wl_storage, + temp_state, vp_wasm_cache, tx_wasm_cache, )?; @@ -1462,7 +1321,7 @@ where fn fee_unshielding_validation( wrapper: &WrapperTx, masp_transaction: Transaction, - temp_wl_storage: &mut TempWlStorage, + temp_state: &mut TempWlState, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, ) -> Result<()> @@ -1476,9 +1335,9 @@ where // further validation // Validate data and generate unshielding tx - let transfer_code_hash = get_transfer_hash_from_storage(temp_wl_storage); + let transfer_code_hash = get_transfer_hash_from_storage(temp_state); - let descriptions_limit = temp_wl_storage + let descriptions_limit = temp_state .read( ¶meters::storage::get_fee_unshielding_descriptions_limit_key(), ) @@ -1494,7 +1353,7 @@ where ) .map_err(|e| Error::TxApply(protocol::Error::FeeUnshieldingError(e)))?; - let fee_unshielding_gas_limit: GasLimit = temp_wl_storage + let fee_unshielding_gas_limit: GasLimit = temp_state .read(¶meters::storage::get_fee_unshielding_gas_limit_key()) .expect("Error reading from storage") .expect("Missing fee unshielding gas limit in storage"); @@ -1507,14 +1366,14 @@ where // from being passed/triggering VPs) but we cannot // commit the tx write log yet cause the tx could still // be invalid. - temp_wl_storage.write_log.precommit_tx(); + temp_state.write_log_mut().precommit_tx(); let result = apply_wasm_tx( unshield, &TxIndex::default(), ShellParams::new( - &mut TxGasMeter::new(fee_unshielding_gas_limit), - temp_wl_storage, + &RefCell::new(TxGasMeter::new(fee_unshielding_gas_limit)), + temp_state, vp_wasm_cache, tx_wasm_cache, ), @@ -1541,34 +1400,28 @@ where #[cfg(test)] mod test_utils { use std::ops::{Deref, DerefMut}; - use std::path::PathBuf; use data_encoding::HEXUPPER; + use namada::core::ethereum_events::Uint; + use namada::core::hash::Hash; + use namada::core::keccak::KeccakHash; + use namada::core::key::*; + use namada::core::storage::{BlockHash, Epoch, Header}; + use namada::core::time::DurationSecs; use namada::ledger::parameters::{EpochDuration, Parameters}; use namada::proof_of_stake::parameters::PosParams; use namada::proof_of_stake::storage::validator_consensus_key_handle; use namada::state::mockdb::MockDB; - use namada::state::{ - LastBlock, Sha256Hasher, StorageWrite, EPOCH_SWITCH_BLOCKS_DELAY, - }; + use namada::state::{LastBlock, StorageWrite}; use namada::tendermint::abci::types::VoteInfo; use namada::token::conversion::update_allowed_conversions; - use namada::tx::data::{Fee, TxType, WrapperTx}; + use namada::tx::data::Fee; use namada::tx::{Code, Data}; - use namada::types::address; - use namada::types::chain::ChainId; - use namada::types::ethereum_events::Uint; - use namada::types::hash::Hash; - use namada::types::keccak::KeccakHash; - use namada::types::key::*; - use namada::types::storage::{BlockHash, Epoch, Header}; - use namada::types::time::{DateTimeUtc, DurationSecs}; use tempfile::tempdir; use tokio::sync::mpsc::{Sender, UnboundedReceiver}; use super::*; use crate::config::ethereum_bridge::ledger::ORACLE_CHANNEL_BUFFER_SIZE; - use crate::facade::tendermint; use crate::facade::tendermint::abci::types::Misbehavior; use crate::facade::tendermint_proto::google::protobuf::Timestamp; use crate::facade::tendermint_proto::v0_37::abci::{ @@ -1658,7 +1511,7 @@ mod test_utils { /// Get the default bridge pool vext bytes to be signed. pub fn get_bp_bytes_to_sign() -> KeccakHash { - use namada::types::keccak::{Hasher, Keccak}; + use namada::core::keccak::{Hasher, Keccak}; let root = [0; 32]; let nonce = Uint::from(0).to_bytes(); @@ -1744,7 +1597,7 @@ mod test_utils { vp_wasm_compilation_cache, tx_wasm_compilation_cache, ); - shell.wl_storage.storage.block.height = height.into(); + shell.state.in_mem_mut().block.height = height.into(); (Self { shell }, receiver, eth_sender, control_receiver) } @@ -1805,7 +1658,7 @@ mod test_utils { }); let results = tx_results .into_iter() - .zip(req.txs.into_iter()) + .zip(req.txs) .map(|(res, tx_bytes)| ProcessedTx { result: res, tx: tx_bytes.into(), @@ -1852,7 +1705,7 @@ mod test_utils { /// wrapper as parameter. #[cfg(test)] pub fn enqueue_tx(&mut self, tx: Tx, inner_tx_gas: Gas) { - self.shell.wl_storage.storage.tx_queue.push(TxInQueue { + self.shell.state.in_mem_mut().tx_queue.push(TxInQueue { tx, gas: inner_tx_gas, }); @@ -1860,9 +1713,9 @@ mod test_utils { /// Start a counter for the next epoch in `num_blocks`. pub fn start_new_epoch_in(&mut self, num_blocks: u64) { - self.wl_storage.storage.next_epoch_min_start_height = - self.wl_storage.storage.get_last_block_height() + num_blocks; - self.wl_storage.storage.next_epoch_min_start_time = + self.state.in_mem_mut().next_epoch_min_start_height = + self.state.in_mem().get_last_block_height() + num_blocks; + self.state.in_mem_mut().next_epoch_min_start_time = DateTimeUtc::now(); } @@ -1880,9 +1733,9 @@ mod test_utils { self.start_new_epoch_in(1); let next_epoch_min_start_height = - self.wl_storage.storage.next_epoch_min_start_height; + self.state.in_mem().next_epoch_min_start_height; if let Some(LastBlock { height, .. }) = - self.wl_storage.storage.last_block.as_mut() + self.state.in_mem_mut().last_block.as_mut() { *height = next_epoch_min_start_height; } else { @@ -1893,7 +1746,7 @@ mod test_utils { for _i in 0..EPOCH_SWITCH_BLOCKS_DELAY { self.finalize_and_commit(req.clone()); } - self.wl_storage.storage.get_current_epoch().0 + self.state.in_mem().get_current_epoch().0 } } @@ -1975,7 +1828,7 @@ mod test_utils { initial_height: 1_u32.into(), }; test.init_chain(req, num_validators); - test.wl_storage.commit_block().expect("Test failed"); + test.state.commit_block().expect("Test failed"); (test, receiver, eth_sender, control_receiver) } @@ -2040,7 +1893,7 @@ mod test_utils { use namada::eth_bridge::storage::active_key; use namada::eth_bridge::storage::eth_bridge_queries::EthBridgeStatus; shell - .wl_storage + .state .write(&active_key(), EthBridgeStatus::Disabled) .expect("Test failed"); } @@ -2064,7 +1917,7 @@ mod test_utils { ); let vp_wasm_compilation_cache = 50 * 1024 * 1024; // 50 kiB let tx_wasm_compilation_cache = 50 * 1024 * 1024; // 50 kiB - let native_token = address::nam(); + let native_token = address::testing::nam(); let mut shell = Shell::::new( config::Ledger::new( base_dir.clone(), @@ -2079,8 +1932,8 @@ mod test_utils { tx_wasm_compilation_cache, ); shell - .wl_storage - .storage + .state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(1)) .expect("begin_block failed"); let keypair = gen_keypair(); @@ -2100,15 +1953,15 @@ mod test_utils { wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - shell.wl_storage.storage.tx_queue.push(TxInQueue { + shell.state.in_mem_mut().tx_queue.push(TxInQueue { tx: wrapper, gas: u64::MAX.into(), }); // Artificially increase the block height so that chain // will read the new block when restarted shell - .wl_storage - .storage + .state + .in_mem_mut() .block .pred_epochs .new_epoch(BlockHeight(1)); @@ -2133,12 +1986,12 @@ mod test_utils { fee_unshielding_descriptions_limit: 0, minimum_gas_price: Default::default(), }; - parameters::init_storage(¶ms, &mut shell.wl_storage) + parameters::init_storage(¶ms, &mut shell.state) .expect("Test failed"); - // make wl_storage to update conversion for a new epoch - update_allowed_conversions(&mut shell.wl_storage) + // make state to update conversion for a new epoch + update_allowed_conversions(&mut shell.state) .expect("update conversions failed"); - shell.wl_storage.commit_block().expect("commit failed"); + shell.state.commit_block().expect("commit failed"); // Drop the shell std::mem::drop(shell); @@ -2166,7 +2019,7 @@ mod test_utils { vp_wasm_compilation_cache, tx_wasm_compilation_cache, ); - assert!(!shell.wl_storage.storage.tx_queue.is_empty()); + assert!(!shell.state.in_mem().tx_queue.is_empty()); } pub(super) fn get_pkh_from_address( @@ -2195,11 +2048,7 @@ mod test_utils { ) { // Let the header time be always ahead of the next epoch min start time let header = Header { - time: shell - .wl_storage - .storage - .next_epoch_min_start_time - .next_second(), + time: shell.state.in_mem().next_epoch_min_start_time.next_second(), ..Default::default() }; let mut req = FinalizeBlock { @@ -2218,23 +2067,17 @@ mod test_utils { #[cfg(test)] mod shell_tests { - use namada::core::ledger::replay_protection; + use namada::core::storage::Epoch; + use namada::replay_protection; use namada::token::read_denom; use namada::tx::data::protocol::{ProtocolTx, ProtocolTxType}; - use namada::tx::data::{Fee, WrapperTx}; - use namada::tx::{ - Code, Data, Section, SignableEthMessage, Signature, Signed, Tx, - }; - use namada::types::ethereum_events::EthereumEvent; - use namada::types::key::RefTo; - use namada::types::storage::{BlockHeight, Epoch}; + use namada::tx::data::Fee; + use namada::tx::{Code, Data, Signature, Signed}; use namada::vote_ext::{ bridge_pool_roots, ethereum_events, ethereum_tx_data_variants, }; - use namada::{parameters, token}; use super::*; - use crate::node::ledger::shell::test_utils; use crate::node::ledger::shell::token::DenominatedAmount; use crate::wallet; @@ -2295,13 +2138,13 @@ mod shell_tests { transfers: vec![], }; shell - .wl_storage - .storage + .state + .in_mem_mut() .expired_txs_queue .push(ExpiredTx::EthereumEvent(ethereum_event_0.clone())); shell - .wl_storage - .storage + .state + .in_mem_mut() .expired_txs_queue .push(ExpiredTx::EthereumEvent(ethereum_event_1.clone())); @@ -2326,14 +2169,14 @@ mod shell_tests { /// not validated by `CheckTx`. #[test] fn test_outdated_nonce_mempool_validate() { - use namada::types::storage::InnerEthEventsQueue; + use namada::core::storage::InnerEthEventsQueue; const LAST_HEIGHT: BlockHeight = BlockHeight(3); let (mut shell, _recv, _, _) = test_utils::setup_at_height(LAST_HEIGHT); shell - .wl_storage - .storage + .state + .in_mem_mut() .eth_events_queue // sent transfers to namada nonce to 5 .transfers_to_namada = InnerEthEventsQueue::new_at(5.into()); @@ -2417,7 +2260,7 @@ mod shell_tests { let eth_vext = EthereumTxData::EthEventsVext( ethereum_events::Vext { validator_addr: address.clone(), - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), ethereum_events: vec![ethereum_event], } .sign(protocol_key) @@ -2431,7 +2274,7 @@ mod shell_tests { let sig = Signed::<_, SignableEthMessage>::new(hot_key, to_sign).sig; let bp_vext = EthereumTxData::BridgePoolVext( bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address, sig, } @@ -2545,7 +2388,7 @@ mod shell_tests { token::Amount::from_uint(100, 0) .expect("This can't fail"), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -2584,7 +2427,7 @@ mod shell_tests { token::Amount::from_uint(100, 0) .expect("This can't fail"), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -2656,7 +2499,7 @@ mod shell_tests { token::Amount::from_uint(100, 0) .expect("This can't fail"), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -2673,12 +2516,11 @@ mod shell_tests { ))); // Write wrapper hash to storage - let mut batch = namada::state::testing::TestStorage::batch(); + let mut batch = namada::state::testing::TestState::batch(); let wrapper_hash = wrapper.header_hash(); let wrapper_hash_key = replay_protection::last_key(&wrapper_hash); shell - .wl_storage - .storage + .state .write_replay_protection_entry(&mut batch, &wrapper_hash_key) .expect("Test failed"); @@ -2715,8 +2557,7 @@ mod shell_tests { // Write inner hash in storage let inner_hash_key = replay_protection::last_key(&inner_tx_hash); shell - .wl_storage - .storage + .state .write_replay_protection_entry(&mut batch, &inner_hash_key) .expect("Test failed"); @@ -2804,14 +2645,14 @@ mod shell_tests { let (shell, _recv, _, _) = test_utils::setup(); let block_gas_limit = - parameters::get_max_block_gas(&shell.wl_storage).unwrap(); + parameters::get_max_block_gas(&shell.state).unwrap(); let keypair = super::test_utils::gen_keypair(); let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(100.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -2844,7 +2685,7 @@ mod shell_tests { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(100.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -2872,7 +2713,7 @@ mod shell_tests { #[test] fn test_fee_non_whitelisted_token() { let (shell, _recv, _, _) = test_utils::setup(); - let apfel_denom = read_denom(&shell.wl_storage, &address::apfel()) + let apfel_denom = read_denom(&shell.state, &address::testing::apfel()) .expect("unable to read denomination from storage") .expect("unable to find denomination of apfels"); @@ -2883,7 +2724,7 @@ mod shell_tests { 100.into(), apfel_denom, ), - token: address::apfel(), + token: address::testing::apfel(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -2918,7 +2759,7 @@ mod shell_tests { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -2954,7 +2795,7 @@ mod shell_tests { amount_per_gas_unit: DenominatedAmount::native( 1_000_000_000.into(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -2990,7 +2831,7 @@ mod shell_tests { amount_per_gas_unit: DenominatedAmount::native( token::Amount::max(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -3023,7 +2864,7 @@ mod shell_tests { let max_tx_bytes: u32 = { let key = parameters::storage::get_max_tx_bytes_key(); shell - .wl_storage + .state .read(&key) .expect("Failed to read from storage") .expect("Max tx bytes should have been written to storage") @@ -3037,7 +2878,7 @@ mod shell_tests { amount_per_gas_unit: DenominatedAmount::native( 100.into(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), diff --git a/crates/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/crates/apps/src/lib/node/ledger/shell/prepare_proposal.rs index cd3b55cf30..b08c2bec59 100644 --- a/crates/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/crates/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -1,17 +1,16 @@ //! Implementation of the [`RequestPrepareProposal`] ABCI++ method for the Shell use masp_primitives::transaction::Transaction; +use namada::core::address::Address; use namada::core::hints; +use namada::core::key::tm_raw_hash_to_string; use namada::gas::TxGasMeter; use namada::ledger::protocol; use namada::ledger::storage::tx_queue::TxInQueue; use namada::proof_of_stake::storage::find_validator_by_raw_hash; -use namada::state::{DBIter, StorageHasher, TempWlStorage, DB}; +use namada::state::{DBIter, StorageHasher, TempWlState, DB}; use namada::tx::data::{DecryptedTx, TxType, WrapperTx}; use namada::tx::Tx; -use namada::types::address::Address; -use namada::types::key::tm_raw_hash_to_string; -use namada::types::time::DateTimeUtc; use namada::vm::wasm::{TxCache, VpCache}; use namada::vm::WasmCacheAccess; @@ -54,15 +53,13 @@ where // add encrypted txs let tm_raw_hash_string = tm_raw_hash_to_string(req.proposer_address); - let block_proposer = find_validator_by_raw_hash( - &self.wl_storage, - tm_raw_hash_string, - ) - .unwrap() - .expect( - "Unable to find native validator address of block proposer \ - from tendermint raw hash", - ); + let block_proposer = + find_validator_by_raw_hash(&self.state, tm_raw_hash_string) + .unwrap() + .expect( + "Unable to find native validator address of block \ + proposer from tendermint raw hash", + ); let (encrypted_txs, alloc) = self.build_encrypted_txs( alloc, &req.txs, @@ -113,16 +110,14 @@ where if hints::unlikely(is_2nd_height_off || is_3rd_height_off) { tracing::warn!( proposal_height = - ?self.wl_storage.storage.block.height, + ?self.state.in_mem().block.height, "No mempool txs are being included in the current proposal" ); EncryptedTxBatchAllocator::WithoutEncryptedTxs( - (&self.wl_storage).into(), + (&*self.state).into(), ) } else { - EncryptedTxBatchAllocator::WithEncryptedTxs( - (&self.wl_storage).into(), - ) + EncryptedTxBatchAllocator::WithEncryptedTxs((&*self.state).into()) } } @@ -141,20 +136,20 @@ where // valid because of mempool check TryInto::::try_into(block_time).ok() }); - let mut temp_wl_storage = TempWlStorage::new(&self.wl_storage.storage); + let mut temp_state = self.state.with_temp_write_log(); let mut vp_wasm_cache = self.vp_wasm_cache.clone(); let mut tx_wasm_cache = self.tx_wasm_cache.clone(); let txs = txs .iter() .filter_map(|tx_bytes| { - match validate_wrapper_bytes(tx_bytes, block_time, block_proposer, proposer_local_config, &mut temp_wl_storage, &mut vp_wasm_cache, &mut tx_wasm_cache, ) { + match validate_wrapper_bytes(tx_bytes, block_time, block_proposer, proposer_local_config, &mut temp_state, &mut vp_wasm_cache, &mut tx_wasm_cache, ) { Ok(gas) => { - temp_wl_storage.write_log.commit_tx(); + temp_state.write_log_mut().commit_tx(); Some((tx_bytes.to_owned(), gas)) }, Err(()) => { - temp_wl_storage.write_log.drop_tx(); + temp_state.write_log_mut().drop_tx(); None } } @@ -209,8 +204,8 @@ where mut alloc: BlockAllocator, ) -> (Vec, BlockAllocator) { let txs = self - .wl_storage - .storage + .state + .in_mem() .tx_queue .iter() .map( @@ -263,7 +258,7 @@ where mut alloc: BlockAllocator, txs: &[TxBytes], ) -> Vec { - if self.wl_storage.storage.last_block.is_none() { + if self.state.in_mem().last_block.is_none() { // genesis should not contain vote extensions. // // this is because we have not decided any block through @@ -323,7 +318,7 @@ fn validate_wrapper_bytes( block_time: Option, block_proposer: &Address, proposer_local_config: Option<&ValidatorLocalConfig>, - temp_wl_storage: &mut TempWlStorage, + temp_state: &mut TempWlState, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, ) -> Result @@ -351,8 +346,7 @@ where let mut tx_gas_meter = TxGasMeter::new(wrapper.gas_limit); tx_gas_meter.add_wrapper_gas(tx_bytes).map_err(|_| ())?; - super::replay_protection_checks(&tx, temp_wl_storage) - .map_err(|_| ())?; + super::replay_protection_checks(&tx, temp_state).map_err(|_| ())?; // Check fees and extract the gas limit of this transaction match prepare_proposal_fee_check( @@ -360,7 +354,7 @@ where protocol::get_fee_unshielding_transaction(&tx, &wrapper), block_proposer, proposer_local_config, - temp_wl_storage, + temp_state, vp_wasm_cache, tx_wasm_cache, ) { @@ -377,7 +371,7 @@ fn prepare_proposal_fee_check( masp_transaction: Option, proposer: &Address, proposer_local_config: Option<&ValidatorLocalConfig>, - temp_wl_storage: &mut TempWlStorage, + temp_state: &mut TempWlState, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, ) -> Result<(), Error> @@ -400,7 +394,7 @@ where ))))? .to_owned(), None => namada::ledger::parameters::read_gas_cost( - temp_wl_storage, + temp_state, &wrapper.fee.token, ) .expect("Must be able to read gas cost parameter") @@ -415,12 +409,12 @@ where wrapper, masp_transaction, minimum_gas_price, - temp_wl_storage, + temp_state, vp_wasm_cache, tx_wasm_cache, )?; - protocol::transfer_fee(temp_wl_storage, proposer, wrapper) + protocol::transfer_fee(temp_state, proposer, wrapper) .map_err(Error::TxApply) } @@ -431,9 +425,12 @@ mod test_prepare_proposal { use std::collections::BTreeSet; use borsh_ext::BorshSerializeExt; + use namada::core::address; + use namada::core::ethereum_events::EthereumEvent; + use namada::core::key::RefTo; + use namada::core::storage::{BlockHeight, InnerEthEventsQueue}; use namada::ledger::gas::Gas; use namada::ledger::pos::PosQueries; - use namada::ledger::replay_protection; use namada::proof_of_stake::storage::{ consensus_validator_set_handle, read_consensus_validator_set_addresses_with_stake, @@ -441,18 +438,14 @@ mod test_prepare_proposal { use namada::proof_of_stake::types::WeightedValidator; use namada::proof_of_stake::Epoch; use namada::state::collections::lazy_map::{NestedSubKey, SubKey}; - use namada::token; use namada::token::{read_denom, Amount, DenominatedAmount}; - use namada::tx::data::{Fee, TxType, WrapperTx}; + use namada::tx::data::Fee; use namada::tx::{Code, Data, Header, Section, Signature, Signed}; - use namada::types::address::{self, Address}; - use namada::types::ethereum_events::EthereumEvent; - use namada::types::key::RefTo; - use namada::types::storage::{BlockHeight, InnerEthEventsQueue}; use namada::vote_ext::{ethereum_events, ethereum_tx_data_variants}; + use namada::{replay_protection, token}; + use namada_sdk::storage::StorageWrite; use super::*; - use crate::config::ValidatorLocalConfig; use crate::node::ledger::shell::test_utils::{ self, gen_keypair, get_pkh_from_address, TestShell, }; @@ -506,7 +499,7 @@ mod test_prepare_proposal { amount_per_gas_unit: DenominatedAmount::native( Default::default(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -584,10 +577,7 @@ mod test_prepare_proposal { } let (shell, _recv, _, _) = test_utils::setup_at_height(LAST_HEIGHT); - assert_eq!( - shell.wl_storage.storage.get_last_block_height(), - LAST_HEIGHT - ); + assert_eq!(shell.state.in_mem().get_last_block_height(), LAST_HEIGHT); check_invalid(&shell, LAST_HEIGHT + 2); check_invalid(&shell, LAST_HEIGHT + 1); @@ -638,20 +628,20 @@ mod test_prepare_proposal { ..Default::default() }); - let params = shell.wl_storage.pos_queries().get_pos_params(); + let params = shell.state.pos_queries().get_pos_params(); // artificially change the voting power of the default validator to // one, change the block height, and commit a dummy block, // to move to a new epoch let events_epoch = shell - .wl_storage + .state .pos_queries() .get_epoch(FIRST_HEIGHT) .expect("Test failed"); let validators_handle = consensus_validator_set_handle().at(&events_epoch); let consensus_in_mem = validators_handle - .iter(&shell.wl_storage) + .iter(&shell.state) .expect("Test failed") .map(|val| { let ( @@ -667,7 +657,7 @@ mod test_prepare_proposal { let mut consensus_set: BTreeSet = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() @@ -676,13 +666,13 @@ mod test_prepare_proposal { let val1 = consensus_set.pop_first().unwrap(); let val2 = consensus_set.pop_first().unwrap(); let pkh1 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val1.address.clone(), Epoch::default(), ); let pkh2 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val2.address.clone(), Epoch::default(), @@ -692,11 +682,11 @@ mod test_prepare_proposal { if address == wallet::defaults::validator_address() { validators_handle .at(&val_stake) - .remove(&mut shell.wl_storage, &val_position) + .remove(&mut shell.state, &val_position) .expect("Test failed"); validators_handle .at(&1.into()) - .insert(&mut shell.wl_storage, val_position, address) + .insert(&mut shell.state, val_position, address) .expect("Test failed"); } } @@ -727,7 +717,7 @@ mod test_prepare_proposal { shell.start_new_epoch(Some(req)); assert_eq!( shell - .wl_storage + .state .pos_queries() .get_epoch(shell.get_current_decision_height()), Some(Epoch(1)) @@ -786,13 +776,15 @@ mod test_prepare_proposal { // Load some tokens to tx signer to pay fees let balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &Address::from(&keypair.ref_to()), ); shell - .wl_storage - .storage - .write(&balance_key, Amount::native_whole(1_000).serialize_to_vec()) + .state + .db_write( + &balance_key, + Amount::native_whole(1_000).serialize_to_vec(), + ) .unwrap(); let mut req = RequestPrepareProposal { @@ -808,7 +800,7 @@ mod test_prepare_proposal { amount_per_gas_unit: DenominatedAmount::native( 1.into(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -842,7 +834,7 @@ mod test_prepare_proposal { // fail the test let expected_txs: Vec
= expected_wrapper .into_iter() - .chain(expected_decrypted.into_iter()) + .chain(expected_decrypted) .map(|tx| tx.header) .collect(); let received: Vec
= shell @@ -877,7 +869,7 @@ mod test_prepare_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -897,9 +889,8 @@ mod test_prepare_proposal { let wrapper_unsigned_hash = wrapper.header_hash(); let hash_key = replay_protection::last_key(&wrapper_unsigned_hash); shell - .wl_storage - .storage - .write(&hash_key, vec![]) + .state + .write_bytes(&hash_key, vec![]) .expect("Test failed"); let req = RequestPrepareProposal { @@ -922,7 +913,7 @@ mod test_prepare_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -959,7 +950,7 @@ mod test_prepare_proposal { amount_per_gas_unit: DenominatedAmount::native( Amount::zero(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -979,9 +970,8 @@ mod test_prepare_proposal { // Write inner hash to storage let hash_key = replay_protection::last_key(&inner_unsigned_hash); shell - .wl_storage - .storage - .write(&hash_key, vec![]) + .state + .write_bytes(&hash_key, vec![]) .expect("Test failed"); let req = RequestPrepareProposal { @@ -1005,7 +995,7 @@ mod test_prepare_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1027,7 +1017,7 @@ mod test_prepare_proposal { new_wrapper.update_header(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair_2.ref_to(), Epoch(0), @@ -1057,7 +1047,7 @@ mod test_prepare_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1099,13 +1089,13 @@ mod test_prepare_proposal { let (shell, _recv, _, _) = test_utils::setup(); let block_gas_limit = - namada::parameters::get_max_block_gas(&shell.wl_storage).unwrap(); + namada::parameters::get_max_block_gas(&shell.state).unwrap(); let keypair = gen_keypair(); let wrapper = WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(100.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1144,7 +1134,7 @@ mod test_prepare_proposal { let wrapper = WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(100.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1184,13 +1174,13 @@ mod test_prepare_proposal { // Remove the allowed btc *local_config = Some(ValidatorLocalConfig { accepted_gas_tokens: std::collections::HashMap::from([( - namada::types::address::nam(), + namada::core::address::testing::nam(), Amount::from(1), )]), }); } - let btc_denom = read_denom(&shell.wl_storage, &address::btc()) + let btc_denom = read_denom(&shell.state, &address::testing::btc()) .expect("unable to read denomination from storage") .expect("unable to find denomination of btcs"); @@ -1200,7 +1190,7 @@ mod test_prepare_proposal { 100.into(), btc_denom, ), - token: address::btc(), + token: address::testing::btc(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -1238,7 +1228,7 @@ mod test_prepare_proposal { fn test_fee_non_whitelisted_token() { let (shell, _recv, _, _) = test_utils::setup(); - let apfel_denom = read_denom(&shell.wl_storage, &address::apfel()) + let apfel_denom = read_denom(&shell.state, &address::testing::apfel()) .expect("unable to read denomination from storage") .expect("unable to find denomination of apfels"); @@ -1248,7 +1238,7 @@ mod test_prepare_proposal { 100.into(), apfel_denom, ), - token: address::apfel(), + token: address::testing::apfel(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -1290,7 +1280,7 @@ mod test_prepare_proposal { // Remove btc and increase minimum for nam *local_config = Some(ValidatorLocalConfig { accepted_gas_tokens: std::collections::HashMap::from([( - namada::types::address::nam(), + namada::core::address::testing::nam(), Amount::from(100), )]), }); @@ -1299,7 +1289,7 @@ mod test_prepare_proposal { let wrapper = WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(10.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -1339,7 +1329,7 @@ mod test_prepare_proposal { let wrapper = WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -1380,7 +1370,7 @@ mod test_prepare_proposal { amount_per_gas_unit: DenominatedAmount::native( 1_000_000_000.into(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -1421,7 +1411,7 @@ mod test_prepare_proposal { amount_per_gas_unit: DenominatedAmount::native( token::Amount::max(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -1460,8 +1450,8 @@ mod test_prepare_proposal { let (mut shell, _recv, _, _) = test_utils::setup_at_height(LAST_HEIGHT); shell - .wl_storage - .storage + .state + .in_mem_mut() .eth_events_queue // sent transfers to namada nonce to 5 .transfers_to_namada = InnerEthEventsQueue::new_at(5.into()); diff --git a/crates/apps/src/lib/node/ledger/shell/process_proposal.rs b/crates/apps/src/lib/node/ledger/shell/process_proposal.rs index db3cdb926c..73648c7207 100644 --- a/crates/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/crates/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -2,16 +2,8 @@ //! and [`RevertProposal`] ABCI++ methods for the Shell use data_encoding::HEXUPPER; -use namada::core::hints; -use namada::ethereum_bridge::protocol::validation::bridge_pool_roots::validate_bp_roots_vext; -use namada::ethereum_bridge::protocol::validation::ethereum_events::validate_eth_events_vext; -use namada::ethereum_bridge::protocol::validation::validator_set_update::validate_valset_upd_vext; use namada::ledger::pos::PosQueries; -use namada::ledger::protocol::get_fee_unshielding_transaction; -use namada::ledger::storage::tx_queue::TxInQueue; -use namada::parameters::validate_tx_bytes; use namada::proof_of_stake::storage::find_validator_by_raw_hash; -use namada::state::{TempWlStorage, WlStorage}; use namada::tx::data::protocol::ProtocolTxType; use namada::vote_ext::ethereum_tx_data_variants; @@ -41,16 +33,16 @@ pub struct ValidationMeta { pub has_decrypted_txs: bool, } -impl From<&WlStorage> for ValidationMeta +impl From<&WlState> for ValidationMeta where D: 'static + DB + for<'iter> DBIter<'iter>, H: 'static + StorageHasher, { - fn from(wl_storage: &WlStorage) -> Self { + fn from(state: &WlState) -> Self { let max_proposal_bytes = - wl_storage.pos_queries().get_max_proposal_bytes().get(); + state.pos_queries().get_max_proposal_bytes().get(); let max_block_gas = - namada::parameters::get_max_block_gas(wl_storage).unwrap(); + namada::parameters::get_max_block_gas(state).unwrap(); let encrypted_txs_bin = EncryptedTxsBins::new(max_proposal_bytes, max_block_gas); let txs_bin = TxBin::init(max_proposal_bytes); @@ -94,7 +86,7 @@ where let native_block_proposer_address = { let tm_raw_hash_string = tm_raw_hash_to_string(&req.proposer_address); - find_validator_by_raw_hash(&self.wl_storage, tm_raw_hash_string) + find_validator_by_raw_hash(&self.state, tm_raw_hash_string) .unwrap() .expect( "Unable to find native validator address of block \ @@ -166,9 +158,9 @@ where block_time: DateTimeUtc, block_proposer: &Address, ) -> (Vec, ValidationMeta) { - let mut tx_queue_iter = self.wl_storage.storage.tx_queue.iter(); - let mut temp_wl_storage = TempWlStorage::new(&self.wl_storage.storage); - let mut metadata = ValidationMeta::from(&self.wl_storage); + let mut tx_queue_iter = self.state.in_mem().tx_queue.iter(); + let mut temp_state = self.state.with_temp_write_log(); + let mut metadata = ValidationMeta::from(self.state.read_only()); let mut vp_wasm_cache = self.vp_wasm_cache.clone(); let mut tx_wasm_cache = self.tx_wasm_cache.clone(); @@ -179,7 +171,7 @@ where tx_bytes, &mut tx_queue_iter, &mut metadata, - &mut temp_wl_storage, + &mut temp_state, block_time, &mut vp_wasm_cache, &mut tx_wasm_cache, @@ -187,7 +179,7 @@ where ); let error_code = ResultCode::from_u32(result.code).unwrap(); if let ResultCode::Ok = error_code { - temp_wl_storage.write_log.commit_tx(); + temp_state.write_log_mut().commit_tx(); } else { tracing::info!( "Process proposal rejected an invalid tx. Error code: \ @@ -195,13 +187,13 @@ where error_code, result.info ); - temp_wl_storage.write_log.drop_tx(); + temp_state.write_log_mut().drop_tx(); } result }) .collect(); metadata.decrypted_queue_has_remaining_txs = - !self.wl_storage.storage.tx_queue.is_empty() + !self.state.in_mem().tx_queue.is_empty() && tx_queue_iter.next().is_some(); (tx_results, metadata) } @@ -234,7 +226,7 @@ where tx_bytes: &[u8], tx_queue_iter: &mut impl Iterator, metadata: &mut ValidationMeta, - temp_wl_storage: &mut TempWlStorage, + temp_state: &mut TempWlState, block_time: DateTimeUtc, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, @@ -247,7 +239,7 @@ where // // NB: always keep this as the first tx check, // as it is a pretty cheap one - if !validate_tx_bytes(&self.wl_storage, tx_bytes.len()) + if !validate_tx_bytes(&self.state, tx_bytes.len()) .expect("Failed to get max tx bytes param from storage") { return TxResult { @@ -350,11 +342,9 @@ where .map_err(|err| err.to_string()) .and_then(|ext| { validate_eth_events_vext( - &self.wl_storage, + &self.state, &ext.0, - self.wl_storage - .storage - .get_last_block_height(), + self.state.in_mem().get_last_block_height(), ) .map(|_| TxResult { code: ResultCode::Ok.into(), @@ -378,11 +368,9 @@ where .map_err(|err| err.to_string()) .and_then(|ext| { validate_bp_roots_vext( - &self.wl_storage, + &self.state, &ext.0, - self.wl_storage - .storage - .get_last_block_height(), + self.state.in_mem().get_last_block_height(), ) .map(|_| TxResult { code: ResultCode::Ok.into(), @@ -408,14 +396,14 @@ where .map_err(|err| err.to_string()) .and_then(|ext| { validate_valset_upd_vext( - &self.wl_storage, + &self.state, &ext, // n.b. only accept validator set updates // issued at // the current epoch (signing off on the // validators // of the next epoch) - self.wl_storage.storage.get_current_epoch().0, + self.state.in_mem().get_current_epoch().0, ) .map(|_| TxResult { code: ResultCode::Ok.into(), @@ -579,8 +567,7 @@ where } // Replay protection checks - if let Err(e) = - super::replay_protection_checks(&tx, temp_wl_storage) + if let Err(e) = super::replay_protection_checks(&tx, temp_state) { return TxResult { code: ResultCode::ReplayTx.into(), @@ -593,7 +580,7 @@ where &wrapper, get_fee_unshielding_transaction(&tx, &wrapper), block_proposer, - temp_wl_storage, + temp_state, vp_wasm_cache, tx_wasm_cache, ) { @@ -631,7 +618,7 @@ fn process_proposal_fee_check( wrapper: &WrapperTx, masp_transaction: Option, proposer: &Address, - temp_wl_storage: &mut TempWlStorage, + temp_state: &mut TempWlState, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, ) -> Result<()> @@ -641,7 +628,7 @@ where CA: 'static + WasmCacheAccess + Sync, { let minimum_gas_price = namada::ledger::parameters::read_gas_cost( - temp_wl_storage, + temp_state, &wrapper.fee.token, ) .expect("Must be able to read gas cost parameter") @@ -654,12 +641,12 @@ where wrapper, masp_transaction, minimum_gas_price, - temp_wl_storage, + temp_state, vp_wasm_cache, tx_wasm_cache, )?; - protocol::transfer_fee(temp_wl_storage, proposer, wrapper) + protocol::transfer_fee(temp_state, proposer, wrapper) .map_err(Error::TxApply) } @@ -667,26 +654,19 @@ where /// are covered by the e2e tests. #[cfg(test)] mod test_process_proposal { - use namada::ledger::replay_protection; + use namada::core::key::*; + use namada::core::storage::Epoch; + use namada::replay_protection; use namada::state::StorageWrite; - use namada::token; use namada::token::{read_denom, Amount, DenominatedAmount}; - use namada::tx::data::{Fee, WrapperTx}; - use namada::tx::{ - Code, Data, Section, SignableEthMessage, Signature, Signed, - }; - use namada::types::ethereum_events::EthereumEvent; - use namada::types::key::*; - use namada::types::storage::Epoch; - use namada::types::time::DateTimeUtc; - use namada::vote_ext::{ - bridge_pool_roots, ethereum_events, EthereumTxData, - }; + use namada::tx::data::Fee; + use namada::tx::{Code, Data, Signature, Signed}; + use namada::vote_ext::{bridge_pool_roots, ethereum_events}; use super::*; use crate::node::ledger::shell::test_utils::{ - self, deactivate_bridge, gen_keypair, get_bp_bytes_to_sign, - ProcessProposal, TestError, TestShell, + deactivate_bridge, gen_keypair, get_bp_bytes_to_sign, ProcessProposal, + TestError, TestShell, }; use crate::node::ledger::shims::abcipp_shim_types::shim::request::ProcessedTx; use crate::wallet; @@ -706,7 +686,7 @@ mod test_process_proposal { }; let ext = ethereum_events::Vext { validator_addr: addr.clone(), - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), ethereum_events: vec![event], } .sign(protocol_key); @@ -744,8 +724,8 @@ mod test_process_proposal { #[test] fn check_rejected_bp_roots_bridge_inactive() { let (mut shell, _a, _b, _c) = test_utils::setup_at_height(1); - shell.wl_storage.storage.block.height = - shell.wl_storage.storage.get_last_block_height(); + shell.state.in_mem_mut().block.height = + shell.state.in_mem().get_last_block_height(); shell.commit(); let protocol_key = shell.mode.get_protocol_key().expect("Test failed"); let addr = shell.mode.get_validator_address().expect("Test failed"); @@ -756,7 +736,7 @@ mod test_process_proposal { ) .sig; let vote_ext = bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: addr.clone(), sig, } @@ -916,7 +896,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Default::default(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, public_key, Epoch(0), @@ -968,7 +948,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Amount::from_uint(100, 0).expect("Test failed"), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1028,11 +1008,11 @@ mod test_process_proposal { let keypair = gen_keypair(); // reduce address balance to match the 100 token min fee let balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &Address::from(&keypair.ref_to()), ); shell - .wl_storage + .state .write(&balance_key, Amount::native_whole(99)) .unwrap(); let keypair = gen_keypair(); @@ -1042,7 +1022,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Amount::from_uint(1, 0).expect("Test failed"), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1095,11 +1075,11 @@ mod test_process_proposal { let keypair = crate::wallet::defaults::daewon_keypair(); // reduce address balance to match the 100 token min fee let balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &Address::from(&keypair.ref_to()), ); shell - .wl_storage + .state .write(&balance_key, Amount::native_whole(99)) .unwrap(); shell.commit(); @@ -1110,7 +1090,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Amount::native_whole(1_000_100), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1168,7 +1148,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Amount::native_whole(i as u64), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1229,7 +1209,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Default::default(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1284,7 +1264,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Default::default(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, pk: keypair.ref_to(), epoch: Epoch(0), @@ -1390,7 +1370,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Amount::zero(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1407,12 +1387,11 @@ mod test_process_proposal { ))); // Write wrapper hash to storage - let mut batch = namada::state::testing::TestStorage::batch(); + let mut batch = namada::state::testing::TestState::batch(); let wrapper_unsigned_hash = wrapper.header_hash(); let hash_key = replay_protection::last_key(&wrapper_unsigned_hash); shell - .wl_storage - .storage + .state .write_replay_protection_entry(&mut batch, &hash_key) .expect("Test failed"); @@ -1449,20 +1428,19 @@ mod test_process_proposal { // Add unshielded balance for fee payment let balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &Address::from(&keypair.ref_to()), ); shell - .wl_storage - .storage - .write(&balance_key, Amount::native_whole(1000).serialize_to_vec()) + .state + .write(&balance_key, Amount::native_whole(1000)) .unwrap(); let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1516,7 +1494,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Amount::zero(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1533,11 +1511,10 @@ mod test_process_proposal { ))); // Write inner hash to storage - let mut batch = namada::state::testing::TestStorage::batch(); + let mut batch = namada::state::testing::TestState::batch(); let hash_key = replay_protection::last_key(&wrapper.raw_header_hash()); shell - .wl_storage - .storage + .state .write_replay_protection_entry(&mut batch, &hash_key) .expect("Test failed"); @@ -1577,7 +1554,7 @@ mod test_process_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1597,7 +1574,7 @@ mod test_process_proposal { new_wrapper.update_header(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair_2.ref_to(), Epoch(0), @@ -1633,7 +1610,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Amount::zero(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1695,7 +1672,7 @@ mod test_process_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1738,7 +1715,7 @@ mod test_process_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1783,14 +1760,14 @@ mod test_process_proposal { let (shell, _recv, _, _) = test_utils::setup(); let block_gas_limit = - namada::parameters::get_max_block_gas(&shell.wl_storage).unwrap(); + namada::parameters::get_max_block_gas(&shell.state).unwrap(); let keypair = super::test_utils::gen_keypair(); let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(100.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1832,7 +1809,7 @@ mod test_process_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(100.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1869,7 +1846,7 @@ mod test_process_proposal { fn test_fee_non_whitelisted_token() { let (shell, _recv, _, _) = test_utils::setup(); - let apfel_denom = read_denom(&shell.wl_storage, &address::apfel()) + let apfel_denom = read_denom(&shell.state, &address::testing::apfel()) .expect("unable to read denomination from storage") .expect("unable to find denomination of apfels"); @@ -1880,7 +1857,7 @@ mod test_process_proposal { 100.into(), apfel_denom, ), - token: address::apfel(), + token: address::testing::apfel(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -1923,7 +1900,7 @@ mod test_process_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -1968,7 +1945,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( 1_000_000_000.into(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -2013,7 +1990,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( token::Amount::max(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -2059,7 +2036,7 @@ mod test_process_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -2076,7 +2053,7 @@ mod test_process_proposal { ))); let wrapper = wrapper.to_bytes(); for height in [1u64, 2] { - if let Some(b) = shell.wl_storage.storage.last_block.as_mut() { + if let Some(b) = shell.state.in_mem_mut().last_block.as_mut() { b.height = height.into(); } let response = { @@ -2114,7 +2091,7 @@ mod test_process_proposal { let max_tx_bytes: u32 = { let key = get_max_tx_bytes_key(); shell - .wl_storage + .state .read(&key) .expect("Failed to read from storage") .expect("Max tx bytes should have been written to storage") @@ -2128,7 +2105,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( 100.into(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -2177,14 +2154,14 @@ mod test_process_proposal { /// not validated by `ProcessProposal`. #[test] fn test_outdated_nonce_process_proposal() { - use namada::types::storage::InnerEthEventsQueue; + use namada::core::storage::InnerEthEventsQueue; const LAST_HEIGHT: BlockHeight = BlockHeight(3); let (mut shell, _recv, _, _) = test_utils::setup_at_height(LAST_HEIGHT); shell - .wl_storage - .storage + .state + .in_mem_mut() .eth_events_queue // sent transfers to namada nonce to 5 .transfers_to_namada = InnerEthEventsQueue::new_at(5.into()); diff --git a/crates/apps/src/lib/node/ledger/shell/queries.rs b/crates/apps/src/lib/node/ledger/shell/queries.rs index 721fd14621..fd1ad1f221 100644 --- a/crates/apps/src/lib/node/ledger/shell/queries.rs +++ b/crates/apps/src/lib/node/ledger/shell/queries.rs @@ -2,11 +2,8 @@ use namada::ledger::dry_run_tx; use namada::ledger::queries::{RequestCtx, ResponseQuery}; -use namada::token; -use namada::types::address::Address; use super::*; -use crate::node::ledger::response; impl Shell where @@ -19,7 +16,7 @@ where /// INVARIANT: This method must be stateless. pub fn query(&self, query: request::Query) -> response::Query { let ctx = RequestCtx { - wl_storage: &self.wl_storage, + state: self.state.read_only(), event_log: self.event_log(), vp_wasm_cache: self.vp_wasm_cache.read_only(), tx_wasm_cache: self.tx_wasm_cache.read_only(), @@ -56,7 +53,7 @@ where ) -> token::Amount { // Storage read must not fail, but there might be no value, in which // case default (0) is returned - token::read_balance(&self.wl_storage, token, owner) + token::read_balance(&self.state, token, owner) .expect("Token balance read in the protocol must not fail") } } @@ -66,16 +63,14 @@ where // access to the `Shell` there #[cfg(test)] mod test_queries { + use namada::core::storage::{BlockHash, Epoch}; use namada::ledger::pos::PosQueries; use namada::proof_of_stake::storage::read_consensus_validator_set_addresses_with_stake; use namada::proof_of_stake::types::WeightedValidator; - use namada::state::EPOCH_SWITCH_BLOCKS_DELAY; use namada::tendermint::abci::types::VoteInfo; - use namada::types::storage::{BlockHash, Epoch}; - use namada_sdk::eth_bridge::{EthBridgeQueries, SendValsetUpd}; + use namada_sdk::eth_bridge::SendValsetUpd; use super::*; - use crate::node::ledger::shell::test_utils; use crate::node::ledger::shell::test_utils::get_pkh_from_address; use crate::node::ledger::shims::abcipp_shim_types::shim::request::FinalizeBlock; @@ -99,7 +94,7 @@ mod test_queries { for (curr_epoch, curr_block_height, can_send) in epoch_assertions { - shell.wl_storage.storage.begin_block( + shell.state.in_mem_mut().begin_block( BlockHash::default(), curr_block_height.into()).unwrap(); if prev_epoch != Some(curr_epoch) { @@ -107,7 +102,7 @@ mod test_queries { shell.start_new_epoch_in(EPOCH_NUM_BLOCKS); } if let Some(b) = - shell.wl_storage.storage.last_block.as_mut() + shell.state.in_mem_mut().last_block.as_mut() { b.height = BlockHeight(curr_block_height - 1); } @@ -119,23 +114,23 @@ mod test_queries { ); assert_eq!( shell - .wl_storage + .state .pos_queries() .get_epoch(curr_block_height.into()), Some(Epoch(curr_epoch)) ); assert_eq!( shell - .wl_storage + .state .ethbridge_queries() .must_send_valset_upd(SendValsetUpd::Now), can_send, ); let params = - shell.wl_storage.pos_queries().get_pos_params(); + shell.state.pos_queries().get_pos_params(); let consensus_set: Vec = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() @@ -144,7 +139,7 @@ mod test_queries { let val1 = consensus_set[0].clone(); let pkh1 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val1.address.clone(), Epoch::default(), diff --git a/crates/apps/src/lib/node/ledger/shell/testing/client.rs b/crates/apps/src/lib/node/ledger/shell/testing/client.rs index 397473453b..5f1df9be2b 100644 --- a/crates/apps/src/lib/node/ledger/shell/testing/client.rs +++ b/crates/apps/src/lib/node/ledger/shell/testing/client.rs @@ -1,6 +1,6 @@ use clap::Command as App; use eyre::Report; -use namada::types::io::Io; +use namada::io::Io; use namada_sdk::error::Error as SdkError; use super::node::MockNode; diff --git a/crates/apps/src/lib/node/ledger/shell/testing/node.rs b/crates/apps/src/lib/node/ledger/shell/testing/node.rs index ceb4fe1637..f133082f8e 100644 --- a/crates/apps/src/lib/node/ledger/shell/testing/node.rs +++ b/crates/apps/src/lib/node/ledger/shell/testing/node.rs @@ -10,6 +10,13 @@ use color_eyre::eyre::{Report, Result}; use data_encoding::HEXUPPER; use itertools::Either; use lazy_static::lazy_static; +use namada::control_flow::time::Duration; +use namada::core::ethereum_events::EthereumEvent; +use namada::core::ethereum_structs; +use namada::core::hash::Hash; +use namada::core::key::tm_consensus_key_raw_hash; +use namada::core::storage::{BlockHash, BlockHeight, Epoch, Header}; +use namada::core::time::DateTimeUtc; use namada::eth_bridge::oracle::config::Config as OracleConfig; use namada::ledger::dry_run_tx; use namada::ledger::events::log::dumb_queries; @@ -25,26 +32,19 @@ use namada::proof_of_stake::types::WeightedValidator; use namada::state::{LastBlock, Sha256Hasher, EPOCH_SWITCH_BLOCKS_DELAY}; use namada::tendermint::abci::response::Info; use namada::tendermint::abci::types::VoteInfo; -use namada::tendermint_rpc::SimpleRequest; -use namada::types::control_flow::time::Duration; -use namada::types::ethereum_events::EthereumEvent; -use namada::types::ethereum_structs; -use namada::types::hash::Hash; -use namada::types::key::tm_consensus_key_raw_hash; -use namada::types::storage::{BlockHash, BlockHeight, Epoch, Header}; -use namada::types::time::DateTimeUtc; use namada_sdk::queries::Client; use namada_sdk::tendermint_proto::google::protobuf::Timestamp; use namada_sdk::tx::data::ResultCode; use regex::Regex; use tendermint_rpc::endpoint::block; +use tendermint_rpc::SimpleRequest; use tokio::sync::mpsc; +use crate::facade::tendermint; use crate::facade::tendermint_proto::v0_37::abci::{ RequestPrepareProposal, RequestProcessProposal, }; use crate::facade::tendermint_rpc::error::Error as RpcError; -use crate::facade::{tendermint, tendermint_rpc}; use crate::node::ledger::ethereum_oracle::test_tools::mock_web3_client::{ TestOracle, Web3Client, Web3Controller, }; @@ -319,7 +319,7 @@ impl MockNode { } pub fn current_epoch(&self) -> Epoch { - self.shell.lock().unwrap().wl_storage.storage.last_epoch + self.shell.lock().unwrap().state.in_mem().last_epoch } pub fn next_epoch(&mut self) -> Epoch { @@ -327,15 +327,15 @@ impl MockNode { let mut locked = self.shell.lock().unwrap(); let next_epoch_height = - locked.wl_storage.storage.get_last_block_height() + 1; - locked.wl_storage.storage.next_epoch_min_start_height = + locked.state.in_mem().get_last_block_height() + 1; + locked.state.in_mem_mut().next_epoch_min_start_height = next_epoch_height; - locked.wl_storage.storage.next_epoch_min_start_time = + locked.state.in_mem_mut().next_epoch_min_start_time = DateTimeUtc::now(); let next_epoch_min_start_height = - locked.wl_storage.storage.next_epoch_min_start_height; + locked.state.in_mem().next_epoch_min_start_height; if let Some(LastBlock { height, .. }) = - locked.wl_storage.storage.last_block.as_mut() + locked.state.in_mem_mut().last_block.as_mut() { *height = next_epoch_min_start_height; } @@ -348,8 +348,8 @@ impl MockNode { self.shell .lock() .unwrap() - .wl_storage - .storage + .state + .in_mem() .get_current_epoch() .0 } @@ -358,11 +358,11 @@ impl MockNode { fn prepare_request(&self) -> (Vec, Vec) { let (val1, ck) = { let locked = self.shell.lock().unwrap(); - let params = locked.wl_storage.pos_queries().get_pos_params(); - let current_epoch = locked.wl_storage.storage.get_current_epoch().0; + let params = locked.state.pos_queries().get_pos_params(); + let current_epoch = locked.state.in_mem().get_current_epoch().0; let consensus_set: Vec = read_consensus_validator_set_addresses_with_stake( - &locked.wl_storage, + &locked.state, current_epoch, ) .unwrap() @@ -371,7 +371,7 @@ impl MockNode { let val1 = consensus_set[0].clone(); let ck = validator_consensus_key_handle(&val1.address) - .get(&locked.wl_storage, current_epoch, ¶ms) + .get(&locked.state, current_epoch, ¶ms) .unwrap() .unwrap(); (val1, ck) @@ -399,11 +399,8 @@ impl MockNode { let (proposer_address, votes) = self.prepare_request(); let mut locked = self.shell.lock().unwrap(); - let height = locked - .wl_storage - .storage - .get_last_block_height() - .next_height(); + let height = + locked.state.in_mem().get_last_block_height().next_height(); // check if we have protocol txs to be included // in the finalize block request @@ -537,11 +534,8 @@ impl MockNode { ..Default::default() }; let mut locked = self.shell.lock().unwrap(); - let height = locked - .wl_storage - .storage - .get_last_block_height() - .next_height(); + let height = + locked.state.in_mem().get_last_block_height().next_height(); let (result, tx_results) = locked.process_proposal(req); let mut errors: Vec<_> = tx_results @@ -571,7 +565,7 @@ impl MockNode { txs: txs .clone() .into_iter() - .zip(tx_results.into_iter()) + .zip(tx_results) .map(|(tx, result)| ProcessedTx { tx: tx.into(), result, @@ -691,8 +685,8 @@ impl<'a> Client for &'a MockNode { self.shell .lock() .unwrap() - .wl_storage - .storage + .state + .in_mem() .last_block .as_ref() .map(|b| b.height) @@ -709,7 +703,7 @@ impl<'a> Client for &'a MockNode { }; let borrowed = self.shell.lock().unwrap(); let ctx = RequestCtx { - wl_storage: &borrowed.wl_storage, + state: &borrowed.state, event_log: borrowed.event_log(), vp_wasm_cache: borrowed.vp_wasm_cache.read_only(), tx_wasm_cache: borrowed.tx_wasm_cache.read_only(), @@ -742,16 +736,16 @@ impl<'a> Client for &'a MockNode { version: "test".to_string(), app_version: 0, last_block_height: locked - .wl_storage - .storage + .state + .in_mem() .last_block .as_ref() .map(|b| b.height.0 as u32) .unwrap_or_default() .into(), last_block_app_hash: locked - .wl_storage - .storage + .state + .in_mem() .last_block .as_ref() .map(|b| b.hash.0) @@ -835,7 +829,7 @@ impl<'a> Client for &'a MockNode { block: 0, app: 0, }), - chain_id: "Namada".try_into().unwrap(), + chain_id: "Namada".into(), height: encoded_event.0 as i64, time: None, last_block_id: None, diff --git a/crates/apps/src/lib/node/ledger/shell/testing/utils.rs b/crates/apps/src/lib/node/ledger/shell/testing/utils.rs index 451e20c2df..c742559b84 100644 --- a/crates/apps/src/lib/node/ledger/shell/testing/utils.rs +++ b/crates/apps/src/lib/node/ledger/shell/testing/utils.rs @@ -4,7 +4,7 @@ use std::pin::Pin; use std::task::{Context, Poll}; use lazy_static::lazy_static; -use namada::types::io::{prompt_aux, read_aux, Io}; +use namada::io::{prompt_aux, read_aux, Io}; use tempfile::tempdir; use tokio::io::{AsyncRead, ReadBuf}; diff --git a/crates/apps/src/lib/node/ledger/shell/utils.rs b/crates/apps/src/lib/node/ledger/shell/utils.rs index 3a9142cdb8..e34009956d 100644 --- a/crates/apps/src/lib/node/ledger/shell/utils.rs +++ b/crates/apps/src/lib/node/ledger/shell/utils.rs @@ -1,6 +1,6 @@ use borsh::BorshDeserialize; +use namada::core::storage::Key; use namada::state::{self, StorageRead}; -use namada::types::storage::Key; pub(super) fn force_read( storage: &S, diff --git a/crates/apps/src/lib/node/ledger/shell/vote_extensions.rs b/crates/apps/src/lib/node/ledger/shell/vote_extensions.rs index 2f0454bbb4..9bf9f9bd1b 100644 --- a/crates/apps/src/lib/node/ledger/shell/vote_extensions.rs +++ b/crates/apps/src/lib/node/ledger/shell/vote_extensions.rs @@ -10,10 +10,8 @@ use namada::ethereum_bridge::protocol::transactions::validator_set_update::sign_ pub use namada::ethereum_bridge::protocol::validation::VoteExtensionError; use namada::tx::Signed; use namada::vote_ext::{ - bridge_pool_roots, ethereum_events, validator_set_update, EthereumTxData, - VoteExtension, + bridge_pool_roots, ethereum_events, validator_set_update, VoteExtension, }; -use namada_sdk::eth_bridge::EthBridgeQueries; use super::*; use crate::node::ledger::shims::abcipp_shim_types::shim::TxBytes; @@ -64,7 +62,7 @@ where _ => unreachable!("{VALIDATOR_EXPECT_MSG}"), }; sign_ethereum_events( - &self.wl_storage, + &self.state, validator_addr, protocol_key, ethereum_events, @@ -89,7 +87,7 @@ where _ => unreachable!("{VALIDATOR_EXPECT_MSG}"), }; sign_bridge_pool_root( - &self.wl_storage, + &self.state, validator_addr, eth_hot_key, protocol_key, @@ -110,7 +108,7 @@ where .mode .get_eth_bridge_keypair() .expect("{VALIDATOR_EXPECT_MSG}"); - sign_validator_set_update(&self.wl_storage, validator_addr, eth_hot_key) + sign_validator_set_update(&self.state, validator_addr, eth_hot_key) } /// Given a slice of [`TxBytes`], return an iterator over the @@ -140,7 +138,7 @@ where .ethereum_events .iter() .any(|event| { - self.wl_storage + self.state .ethbridge_queries() .validate_eth_event_nonce(event) }) @@ -157,7 +155,7 @@ where // will eventually be evicted, getting replaced // by newer txs. (!self - .wl_storage + .state .ethbridge_queries() .valset_upd_seen(ext.data.signing_epoch.next())) .then(|| tx_bytes.clone()) diff --git a/crates/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs b/crates/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs index f6f9f10308..c674d82fc9 100644 --- a/crates/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs +++ b/crates/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs @@ -1,11 +1,8 @@ //! Extend Tendermint votes with signatures of the Ethereum //! bridge pool root and nonce seen by a quorum of validators. use itertools::Itertools; -use namada::state::{DBIter, StorageHasher, DB}; -use namada::tx::Signed; use super::*; -use crate::node::ledger::shell::Shell; impl Shell where @@ -29,9 +26,9 @@ where > + 'iter { vote_extensions.into_iter().map(|vote_extension| { validate_bp_roots_vext( - &self.wl_storage, + &self.state, &vote_extension, - self.wl_storage.storage.get_last_block_height(), + self.state.in_mem().get_last_block_height(), )?; Ok(vote_extension) }) @@ -56,6 +53,11 @@ where #[cfg(test)] mod test_bp_vote_extensions { + use namada::core::ethereum_events::Uint; + use namada::core::keccak::{keccak_hash, KeccakHash}; + use namada::core::key::*; + use namada::core::storage::BlockHeight; + use namada::core::token; use namada::ethereum_bridge::protocol::validation::bridge_pool_roots::validate_bp_roots_vext; use namada::ethereum_bridge::storage::bridge_pool::get_key_from_hash; use namada::ethereum_bridge::storage::eth_bridge_queries::EthBridgeQueries; @@ -70,12 +72,7 @@ mod test_bp_vote_extensions { use namada::proof_of_stake::{become_validator, BecomeValidator, Epoch}; use namada::state::StorageWrite; use namada::tendermint::abci::types::VoteInfo; - use namada::tx::{SignableEthMessage, Signed}; - use namada::types::ethereum_events::Uint; - use namada::types::keccak::{keccak_hash, KeccakHash}; - use namada::types::key::*; - use namada::types::storage::BlockHeight; - use namada::types::token; + use namada::tx::Signed; use namada::vote_ext::bridge_pool_roots; use crate::node::ledger::shell::test_utils::*; @@ -90,15 +87,11 @@ mod test_bp_vote_extensions { validators_handle .at(&1.into()) .at(&token::Amount::native_whole(100)) - .insert( - &mut shell.wl_storage, - ValidatorPosition(1), - bertha_address(), - ) + .insert(&mut shell.state, ValidatorPosition(1), bertha_address()) .expect("Test failed"); // change pipeline length to 1 - let mut params = shell.wl_storage.pos_queries().get_pos_params(); + let mut params = shell.state.pos_queries().get_pos_params(); params.owned.pipeline_len = 1; let consensus_key = gen_keypair(); @@ -107,7 +100,7 @@ mod test_bp_vote_extensions { let cold_key = gen_secp256k1_keypair(); become_validator( - &mut shell.wl_storage, + &mut shell.state, BecomeValidator { params: ¶ms, address: &bertha_address(), @@ -127,7 +120,7 @@ mod test_bp_vote_extensions { // we advance forward to the next epoch let consensus_set: Vec = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() @@ -136,7 +129,7 @@ mod test_bp_vote_extensions { let val1 = consensus_set[0].clone(); let pkh1 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val1.address.clone(), Epoch::default(), @@ -159,19 +152,19 @@ mod test_bp_vote_extensions { let to_sign = get_bp_bytes_to_sign(); let sig = Signed::<_, SignableEthMessage>::new(&hot_key, to_sign).sig; let vote_ext = bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: bertha_address(), sig, } .sign(&bertha_keypair()); - shell.wl_storage.storage.block.height = - shell.wl_storage.storage.get_last_block_height(); + shell.state.in_mem_mut().block.height = + shell.state.in_mem().get_last_block_height(); shell.commit(); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &vote_ext.0, - shell.wl_storage.storage.get_last_block_height() + shell.state.in_mem().get_last_block_height() ) .is_ok() ); @@ -189,8 +182,8 @@ mod test_bp_vote_extensions { .get_validator_address() .expect("Test failed") .clone(); - shell.wl_storage.storage.block.height = - shell.wl_storage.storage.get_last_block_height(); + shell.state.in_mem_mut().block.height = + shell.state.in_mem().get_last_block_height(); shell.commit(); let to_sign = get_bp_bytes_to_sign(); let sig = Signed::<_, SignableEthMessage>::new( @@ -199,7 +192,7 @@ mod test_bp_vote_extensions { ) .sig; let vote_ext = bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address, sig, } @@ -210,9 +203,9 @@ mod test_bp_vote_extensions { ); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &vote_ext.0, - shell.wl_storage.storage.get_last_block_height(), + shell.state.in_mem().get_last_block_height(), ) .is_ok() ) @@ -229,8 +222,8 @@ mod test_bp_vote_extensions { .get_validator_address() .expect("Test failed") .clone(); - shell.wl_storage.storage.block.height = - shell.wl_storage.storage.get_last_block_height(); + shell.state.in_mem_mut().block.height = + shell.state.in_mem().get_last_block_height(); shell.commit(); let to_sign = get_bp_bytes_to_sign(); let sig = Signed::<_, SignableEthMessage>::new( @@ -239,7 +232,7 @@ mod test_bp_vote_extensions { ) .sig; let vote_ext = bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address, sig, } @@ -265,21 +258,21 @@ mod test_bp_vote_extensions { .get_validator_address() .expect("Test failed") .clone(); - shell.wl_storage.storage.block.height = - shell.wl_storage.storage.get_last_block_height(); + shell.state.in_mem_mut().block.height = + shell.state.in_mem().get_last_block_height(); shell.commit(); let to_sign = get_bp_bytes_to_sign(); let sig = Signed::<_, SignableEthMessage>::new(&signing_key, to_sign).sig; let bp_root = bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address, sig, } .sign(shell.mode.get_protocol_key().expect("Test failed")); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &bp_root.0, shell.get_current_decision_height(), ) @@ -306,16 +299,16 @@ mod test_bp_vote_extensions { ) .sig; let bp_root = bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address, sig, } .sign(&bertha_keypair()); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &bp_root.0, - shell.wl_storage.storage.get_last_block_height() + shell.state.in_mem().get_last_block_height() ) .is_err() ) @@ -338,9 +331,9 @@ mod test_bp_vote_extensions { assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &bp_root.0, - shell.wl_storage.storage.get_last_block_height() + shell.state.in_mem().get_last_block_height() ) .is_err() ) @@ -352,7 +345,7 @@ mod test_bp_vote_extensions { fn test_block_height_too_high() { let (shell, _, _, _) = setup_at_height(3u64); reject_incorrect_block_number( - shell.wl_storage.storage.get_last_block_height() + 1, + shell.state.in_mem().get_last_block_height() + 1, &shell, ); } @@ -378,16 +371,16 @@ mod test_bp_vote_extensions { ) .sig; let bp_root = bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address, sig, } .sign(shell.mode.get_protocol_key().expect("Test failed")); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &bp_root.0, - shell.wl_storage.storage.get_last_block_height() + shell.state.in_mem().get_last_block_height() ) .is_err() ) @@ -406,16 +399,16 @@ mod test_bp_vote_extensions { ) .sig; let bp_root = bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address, sig, } .sign(shell.mode.get_protocol_key().expect("Test failed")); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &bp_root.0, - shell.wl_storage.storage.get_last_block_height() + shell.state.in_mem().get_last_block_height() ) .is_err() ) @@ -427,28 +420,28 @@ mod test_bp_vote_extensions { fn test_vext_for_old_height() { let (mut shell, _recv, _, _oracle_control_recv) = setup_at_height(1u64); let address = shell.mode.get_validator_address().unwrap().clone(); - shell.wl_storage.storage.block.height = 2.into(); + shell.state.in_mem_mut().block.height = 2.into(); let key = get_key_from_hash(&KeccakHash([1; 32])); - let height = shell.wl_storage.storage.block.height; - shell.wl_storage.write(&key, height).expect("Test failed"); + let height = shell.state.in_mem().block.height; + shell.state.write(&key, height).expect("Test failed"); shell.commit(); assert_eq!( shell - .wl_storage + .state .ethbridge_queries() .get_bridge_pool_root_at_height(2.into()) .unwrap(), KeccakHash([1; 32]) ); - shell.wl_storage.storage.block.height = 3.into(); - shell.wl_storage.delete(&key).expect("Test failed"); + shell.state.in_mem_mut().block.height = 3.into(); + shell.state.delete(&key).expect("Test failed"); let key = get_key_from_hash(&KeccakHash([2; 32])); - let height = shell.wl_storage.storage.block.height; - shell.wl_storage.write(&key, height).expect("Test failed"); + let height = shell.state.in_mem().block.height; + shell.state.write(&key, height).expect("Test failed"); shell.commit(); assert_eq!( shell - .wl_storage + .state .ethbridge_queries() .get_bridge_pool_root_at_height(3.into()) .unwrap(), @@ -468,7 +461,7 @@ mod test_bp_vote_extensions { .sign(shell.mode.get_protocol_key().expect("Test failed")); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &bp_root.0, shell.get_current_decision_height() ) @@ -488,7 +481,7 @@ mod test_bp_vote_extensions { .sign(shell.mode.get_protocol_key().expect("Test failed")); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &bp_root.0, shell.get_current_decision_height() ) @@ -502,28 +495,28 @@ mod test_bp_vote_extensions { fn test_wrong_height_for_root() { let (mut shell, _recv, _, _oracle_control_recv) = setup_at_height(1u64); let address = shell.mode.get_validator_address().unwrap().clone(); - shell.wl_storage.storage.block.height = 2.into(); + shell.state.in_mem_mut().block.height = 2.into(); let key = get_key_from_hash(&KeccakHash([1; 32])); - let height = shell.wl_storage.storage.block.height; - shell.wl_storage.write(&key, height).expect("Test failed"); + let height = shell.state.in_mem().block.height; + shell.state.write(&key, height).expect("Test failed"); shell.commit(); assert_eq!( shell - .wl_storage + .state .ethbridge_queries() .get_bridge_pool_root_at_height(2.into()) .unwrap(), KeccakHash([1; 32]) ); - shell.wl_storage.storage.block.height = 3.into(); - shell.wl_storage.delete(&key).expect("Test failed"); + shell.state.in_mem_mut().block.height = 3.into(); + shell.state.delete(&key).expect("Test failed"); let key = get_key_from_hash(&KeccakHash([2; 32])); - let height = shell.wl_storage.storage.block.height; - shell.wl_storage.write(&key, height).expect("Test failed"); + let height = shell.state.in_mem().block.height; + shell.state.write(&key, height).expect("Test failed"); shell.commit(); assert_eq!( shell - .wl_storage + .state .ethbridge_queries() .get_bridge_pool_root_at_height(3.into()) .unwrap(), @@ -543,7 +536,7 @@ mod test_bp_vote_extensions { .sign(shell.mode.get_protocol_key().expect("Test failed")); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &bp_root.0, shell.get_current_decision_height() ) diff --git a/crates/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs b/crates/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs index 8ef538bbcb..881e25e278 100644 --- a/crates/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs +++ b/crates/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs @@ -2,14 +2,9 @@ use std::collections::{BTreeMap, HashMap}; -use namada::state::{DBIter, StorageHasher, DB}; -use namada::tx::Signed; -use namada::types::ethereum_events::EthereumEvent; -use namada::vote_ext::ethereum_events::{self, MultiSignedEthEvent}; -use namada_sdk::eth_bridge::EthBridgeQueries; +use namada::vote_ext::ethereum_events::MultiSignedEthEvent; use super::*; -use crate::node::ledger::shell::{Shell, ShellMode}; impl Shell where @@ -19,7 +14,7 @@ where /// Checks the channel from the Ethereum oracle monitoring /// the fullnode and retrieves all seen Ethereum events. pub fn new_ethereum_events(&mut self) -> Vec { - let queries = self.wl_storage.ethbridge_queries(); + let queries = self.state.ethbridge_queries(); match &mut self.mode { ShellMode::Validator { eth_oracle: @@ -54,9 +49,9 @@ where > + 'iter { vote_extensions.into_iter().map(|vote_extension| { validate_eth_events_vext( - &self.wl_storage, + &self.state, &vote_extension, - self.wl_storage.storage.get_last_block_height(), + self.state.in_mem().get_last_block_height(), )?; Ok(vote_extension) }) @@ -86,7 +81,7 @@ where vote_extensions: Vec>, ) -> Option { #[allow(clippy::question_mark)] - if self.wl_storage.storage.last_block.is_none() { + if self.state.in_mem().last_block.is_none() { return None; } @@ -140,9 +135,15 @@ where #[cfg(test)] mod test_vote_extensions { - use std::convert::TryInto; use borsh_ext::BorshSerializeExt; + use namada::core::address::testing::gen_established_address; + use namada::core::ethereum_events::{ + EthAddress, EthereumEvent, TransferToEthereum, Uint, + }; + use namada::core::hash::Hash; + use namada::core::key::*; + use namada::core::storage::{Epoch, InnerEthEventsQueue}; use namada::eth_bridge::storage::bridge_pool; use namada::ledger::eth_bridge::EthBridgeQueries; use namada::ledger::pos::PosQueries; @@ -153,14 +154,8 @@ mod test_vote_extensions { use namada::proof_of_stake::types::WeightedValidator; use namada::state::collections::lazy_map::{NestedSubKey, SubKey}; use namada::tendermint::abci::types::VoteInfo; - use namada::types::address::testing::gen_established_address; - use namada::types::ethereum_events::{ - EthAddress, EthereumEvent, TransferToEthereum, Uint, - }; - use namada::types::hash::Hash; - use namada::types::key::*; - use namada::types::storage::{Epoch, InnerEthEventsQueue}; use namada::vote_ext::ethereum_events; + use namada_sdk::storage::StorageWrite; use super::validate_eth_events_vext; use crate::node::ledger::shell::test_utils::*; @@ -174,22 +169,24 @@ mod test_vote_extensions { // write bp nonce to storage shell - .wl_storage - .storage - .write(&bridge_pool::get_nonce_key(), nonce.serialize_to_vec()) + .state + .write_bytes( + &bridge_pool::get_nonce_key(), + nonce.serialize_to_vec(), + ) .expect("Test failed"); // write nam nonce to the eth events queue shell - .wl_storage - .storage + .state + .in_mem_mut() .eth_events_queue .transfers_to_namada = InnerEthEventsQueue::new_at(nonce); // eth transfers with the same nonce as the bp nonce in storage are // valid shell - .wl_storage + .state .ethbridge_queries() .validate_eth_event_nonce(&EthereumEvent::TransfersToEthereum { nonce, @@ -202,7 +199,7 @@ mod test_vote_extensions { // eth transfers with different nonces are invalid shell - .wl_storage + .state .ethbridge_queries() .validate_eth_event_nonce(&EthereumEvent::TransfersToEthereum { nonce: nonce + 1, @@ -213,7 +210,7 @@ mod test_vote_extensions { .ok_or(()) .expect_err("Test failed"); shell - .wl_storage + .state .ethbridge_queries() .validate_eth_event_nonce(&EthereumEvent::TransfersToEthereum { nonce: nonce - 1, @@ -226,7 +223,7 @@ mod test_vote_extensions { // nam transfers with nonces >= the nonce in storage are valid shell - .wl_storage + .state .ethbridge_queries() .validate_eth_event_nonce(&EthereumEvent::TransfersToNamada { nonce, @@ -236,7 +233,7 @@ mod test_vote_extensions { .ok_or(()) .expect("Test failed"); shell - .wl_storage + .state .ethbridge_queries() .validate_eth_event_nonce(&EthereumEvent::TransfersToNamada { nonce: nonce + 5, @@ -248,7 +245,7 @@ mod test_vote_extensions { // nam transfers with lower nonces are invalid shell - .wl_storage + .state .ethbridge_queries() .validate_eth_event_nonce(&EthereumEvent::TransfersToNamada { nonce: nonce - 1, @@ -258,7 +255,7 @@ mod test_vote_extensions { .ok_or(()) .expect_err("Test failed"); shell - .wl_storage + .state .ethbridge_queries() .validate_eth_event_nonce(&EthereumEvent::TransfersToNamada { nonce: nonce - 2, @@ -372,7 +369,7 @@ mod test_vote_extensions { .sign(&signing_key); assert!( validate_eth_events_vext( - &shell.wl_storage, + &shell.state, ðereum_events, shell.get_current_decision_height(), ) @@ -411,11 +408,11 @@ mod test_vote_extensions { } .sign(shell.mode.get_protocol_key().expect("Test failed")); - assert_eq!(shell.wl_storage.storage.get_current_epoch().0.0, 0); + assert_eq!(shell.state.in_mem().get_current_epoch().0.0, 0); // remove all validators of the next epoch let validators_handle = consensus_validator_set_handle().at(&1.into()); let consensus_in_mem = validators_handle - .iter(&shell.wl_storage) + .iter(&shell.state) .expect("Test failed") .map(|val| { let ( @@ -431,23 +428,23 @@ mod test_vote_extensions { for (val_stake, val_position) in consensus_in_mem.into_iter() { validators_handle .at(&val_stake) - .remove(&mut shell.wl_storage, &val_position) + .remove(&mut shell.state, &val_position) .expect("Test failed"); } // we advance forward to the next epoch let consensus_set: Vec = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() .into_iter() .collect(); - let params = shell.wl_storage.pos_queries().get_pos_params(); + let params = shell.state.pos_queries().get_pos_params(); let val1 = consensus_set[0].clone(); let pkh1 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val1.address.clone(), Epoch::default(), @@ -467,17 +464,17 @@ mod test_vote_extensions { assert_eq!(shell.start_new_epoch(Some(req)).0, 1); assert!( shell - .wl_storage + .state .pos_queries() .get_validator_from_protocol_pk(&signing_key.ref_to(), None) .is_err() ); let prev_epoch = - Epoch(shell.wl_storage.storage.get_current_epoch().0.0 - 1); + Epoch(shell.state.in_mem().get_current_epoch().0.0 - 1); assert!( shell .shell - .wl_storage + .state .pos_queries() .get_validator_from_protocol_pk( &signing_key.ref_to(), @@ -487,12 +484,8 @@ mod test_vote_extensions { ); assert!( - validate_eth_events_vext( - &shell.wl_storage, - &vote_ext, - signed_height - ) - .is_ok() + validate_eth_events_vext(&shell.state, &vote_ext, signed_height) + .is_ok() ); } @@ -516,19 +509,19 @@ mod test_vote_extensions { }], relayer: gen_established_address(), }], - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address.clone(), }; ethereum_events.block_height = - shell.wl_storage.storage.get_last_block_height() + 1; + shell.state.in_mem().get_last_block_height() + 1; let signed_vext = ethereum_events .sign(shell.mode.get_protocol_key().expect("Test failed")); assert!( validate_eth_events_vext( - &shell.wl_storage, + &shell.state, &signed_vext, - shell.wl_storage.storage.get_last_block_height() + shell.state.in_mem().get_last_block_height() ) .is_err() ) @@ -552,16 +545,16 @@ mod test_vote_extensions { }], relayer: gen_established_address(), }], - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address.clone(), } .sign(shell.mode.get_protocol_key().expect("Test failed")); assert!( validate_eth_events_vext( - &shell.wl_storage, + &shell.state, &vote_ext, - shell.wl_storage.storage.get_last_block_height() + shell.state.in_mem().get_last_block_height() ) .is_err() ) diff --git a/crates/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs b/crates/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs index 67eb2a0a88..1eba475c22 100644 --- a/crates/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs +++ b/crates/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs @@ -3,11 +3,7 @@ use std::collections::HashMap; -use namada::state::{DBIter, StorageHasher, DB}; -use namada::vote_ext::validator_set_update; - use super::*; -use crate::node::ledger::shell::Shell; impl Shell where @@ -31,9 +27,9 @@ where > + '_ { vote_extensions.into_iter().map(|vote_extension| { validate_valset_upd_vext( - &self.wl_storage, + &self.state, &vote_extension, - self.wl_storage.storage.get_current_epoch().0, + self.state.in_mem().get_current_epoch().0, )?; Ok(vote_extension) }) @@ -60,7 +56,7 @@ where vote_extensions: Vec, ) -> Option { #[allow(clippy::question_mark)] - if self.wl_storage.storage.last_block.is_none() { + if self.state.in_mem().last_block.is_none() { return None; } @@ -113,6 +109,7 @@ where #[cfg(test)] mod test_vote_extensions { + use namada::core::key::RefTo; use namada::ledger::pos::PosQueries; use namada::proof_of_stake::storage::{ consensus_validator_set_handle, @@ -122,7 +119,6 @@ mod test_vote_extensions { use namada::proof_of_stake::Epoch; use namada::state::collections::lazy_map::{NestedSubKey, SubKey}; use namada::tendermint::abci::types::VoteInfo; - use namada::types::key::RefTo; use namada::vote_ext::validator_set_update; use namada_sdk::eth_bridge::EthBridgeQueries; @@ -142,12 +138,12 @@ mod test_vote_extensions { let eth_bridge_key = shell.mode.get_eth_bridge_keypair().expect("Test failed"); - let signing_epoch = shell.wl_storage.storage.get_current_epoch().0; + let signing_epoch = shell.state.in_mem().get_current_epoch().0; let next_epoch = signing_epoch.next(); let voting_powers = { shell - .wl_storage + .state .ethbridge_queries() .get_consensus_eth_addresses(Some(next_epoch)) .iter() @@ -157,19 +153,17 @@ mod test_vote_extensions { .collect() }; #[allow(clippy::redundant_clone)] - let validator_set_update = Some( - validator_set_update::Vext { - voting_powers, - validator_addr: validator_addr.clone(), - // invalid epoch - signing_epoch: next_epoch, - } - .sign(eth_bridge_key), - ); + let validator_set_update = validator_set_update::Vext { + voting_powers, + validator_addr: validator_addr.clone(), + // invalid epoch + signing_epoch: next_epoch, + } + .sign(eth_bridge_key); assert!( validate_valset_upd_vext( - &shell.wl_storage, - &validator_set_update.unwrap(), + &shell.state, + &validator_set_update, signing_epoch, ) .is_err() @@ -186,11 +180,11 @@ mod test_vote_extensions { let bertha_addr = wallet::defaults::bertha_address(); (test_utils::gen_secp256k1_keypair(), bertha_key, bertha_addr) }; - let signing_epoch = shell.wl_storage.storage.get_current_epoch().0; + let signing_epoch = shell.state.in_mem().get_current_epoch().0; let voting_powers = { let next_epoch = signing_epoch.next(); shell - .wl_storage + .state .ethbridge_queries() .get_consensus_eth_addresses(Some(next_epoch)) .iter() @@ -200,18 +194,16 @@ mod test_vote_extensions { .collect() }; #[allow(clippy::redundant_clone)] - let validator_set_update = Some( - validator_set_update::Vext { - voting_powers, - signing_epoch, - validator_addr: validator_addr.clone(), - } - .sign(ð_bridge_key), - ); + let validator_set_update = validator_set_update::Vext { + voting_powers, + signing_epoch, + validator_addr: validator_addr.clone(), + } + .sign(ð_bridge_key); assert!( validate_valset_upd_vext( - &shell.wl_storage, - &validator_set_update.unwrap(), + &shell.state, + &validator_set_update, signing_epoch, ) .is_err() @@ -228,13 +220,13 @@ mod test_vote_extensions { // validators from the current epoch sign over validator // set of the next epoch - let signing_epoch = shell.wl_storage.storage.get_current_epoch().0; + let signing_epoch = shell.state.in_mem().get_current_epoch().0; assert_eq!(signing_epoch.0, 0); // remove all validators of the next epoch let validators_handle = consensus_validator_set_handle().at(&1.into()); let consensus_in_mem = validators_handle - .iter(&shell.wl_storage) + .iter(&shell.state) .expect("Test failed") .map(|val| { let ( @@ -250,7 +242,7 @@ mod test_vote_extensions { for (val_stake, val_position) in consensus_in_mem.into_iter() { validators_handle .at(&val_stake) - .remove(&mut shell.wl_storage, &val_position) + .remove(&mut shell.state, &val_position) .expect("Test failed"); } @@ -270,7 +262,7 @@ mod test_vote_extensions { let voting_powers = { let next_epoch = signing_epoch.next(); shell - .wl_storage + .state .ethbridge_queries() .get_consensus_eth_addresses(Some(next_epoch)) .iter() @@ -288,10 +280,10 @@ mod test_vote_extensions { assert!(vote_ext.data.voting_powers.is_empty()); // we advance forward to the next epoch - let params = shell.wl_storage.pos_queries().get_pos_params(); + let params = shell.state.pos_queries().get_pos_params(); let mut consensus_set: Vec = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, 0.into(), ) .unwrap() @@ -300,7 +292,7 @@ mod test_vote_extensions { assert_eq!(consensus_set.len(), 1); let val1 = consensus_set.remove(0); let pkh1 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val1.address, Epoch::default(), @@ -320,16 +312,16 @@ mod test_vote_extensions { assert_eq!(shell.start_new_epoch(Some(req)).0, 1); assert!( shell - .wl_storage + .state .pos_queries() .get_validator_from_protocol_pk(&protocol_key.ref_to(), None) .is_err() ); - let prev_epoch = shell.wl_storage.storage.get_current_epoch().0 - 1; + let prev_epoch = shell.state.in_mem().get_current_epoch().0 - 1; assert!( shell .shell - .wl_storage + .state .pos_queries() .get_validator_from_protocol_pk( &protocol_key.ref_to(), @@ -340,12 +332,8 @@ mod test_vote_extensions { // check validation of the vext passes assert!( - validate_valset_upd_vext( - &shell.wl_storage, - &vote_ext, - signing_epoch - ) - .is_ok() + validate_valset_upd_vext(&shell.state, &vote_ext, signing_epoch) + .is_ok() ); } @@ -360,13 +348,13 @@ mod test_vote_extensions { let eth_bridge_key = shell.mode.get_eth_bridge_keypair().expect("Test failed"); - let signing_epoch = shell.wl_storage.storage.get_current_epoch().0; + let signing_epoch = shell.state.in_mem().get_current_epoch().0; #[allow(clippy::redundant_clone)] let validator_set_update = { let voting_powers = { let next_epoch = signing_epoch.next(); shell - .wl_storage + .state .ethbridge_queries() .get_consensus_eth_addresses(Some(next_epoch)) .iter() @@ -386,7 +374,7 @@ mod test_vote_extensions { }; assert!( validate_valset_upd_vext( - &shell.wl_storage, + &shell.state, &validator_set_update.unwrap(), signing_epoch, ) diff --git a/crates/apps/src/lib/node/ledger/shims/abcipp_shim.rs b/crates/apps/src/lib/node/ledger/shims/abcipp_shim.rs index 1b8b7343fd..11788fe9d1 100644 --- a/crates/apps/src/lib/node/ledger/shims/abcipp_shim.rs +++ b/crates/apps/src/lib/node/ledger/shims/abcipp_shim.rs @@ -1,18 +1,16 @@ -use std::convert::TryFrom; use std::future::Future; use std::path::PathBuf; use std::pin::Pin; use std::task::{Context, Poll}; use futures::future::FutureExt; +use namada::core::hash::Hash; +use namada::core::key::tm_raw_hash_to_string; +use namada::core::storage::{BlockHash, BlockHeight}; use namada::proof_of_stake::storage::find_validator_by_raw_hash; +use namada::time::{DateTimeUtc, Utc}; use namada::tx::data::hash_tx; use namada::tx::Tx; -use namada::types::hash::Hash; -use namada::types::key::tm_raw_hash_to_string; -use namada::types::storage::{BlockHash, BlockHeight}; -use namada::types::time::Utc; -use namada_sdk::types::time::DateTimeUtc; use tokio::sync::broadcast; use tokio::sync::mpsc::UnboundedSender; use tower::Service; @@ -136,7 +134,7 @@ impl AbcippShim { begin_block_request.header.proposer_address, ); let block_proposer = find_validator_by_raw_hash( - &self.service.wl_storage, + &self.service.state, tm_raw_hash_string, ) .unwrap() diff --git a/crates/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs b/crates/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs index 5b0a553f55..2038beaf19 100644 --- a/crates/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs +++ b/crates/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs @@ -1,7 +1,6 @@ use crate::facade::tendermint::v0_37::abci::{Request, Response}; pub mod shim { - use std::convert::TryFrom; use thiserror::Error; @@ -151,11 +150,10 @@ pub mod shim { /// Custom types for request payloads pub mod request { - use std::convert::TryFrom; - use namada::types::hash::Hash; - use namada::types::storage::{BlockHash, Header}; - use namada::types::time::DateTimeUtc; + use namada::core::hash::Hash; + use namada::core::storage::{BlockHash, Header}; + use namada::core::time::DateTimeUtc; use super::VoteInfo; use crate::facade::tendermint::abci::types::Misbehavior; diff --git a/crates/apps/src/lib/node/ledger/storage/mod.rs b/crates/apps/src/lib/node/ledger/storage/mod.rs index 385405fed1..7da4d37fc9 100644 --- a/crates/apps/src/lib/node/ledger/storage/mod.rs +++ b/crates/apps/src/lib/node/ledger/storage/mod.rs @@ -9,14 +9,16 @@ use arse_merkle_tree::blake2b::Blake2bHasher; use arse_merkle_tree::traits::Hasher; use arse_merkle_tree::H256; use blake2b_rs::{Blake2b, Blake2bBuilder}; -use namada::state::{State, StorageHasher}; +use namada::state::StorageHasher; +use namada_sdk::state::FullAccessState; #[derive(Default)] pub struct PersistentStorageHasher(Blake2bHasher); pub type PersistentDB = rocksdb::RocksDB; -pub type PersistentStorage = State; +pub type PersistentState = + FullAccessState; impl Hasher for PersistentStorageHasher { fn write_bytes(&mut self, h: &[u8]) { @@ -54,24 +56,22 @@ mod tests { use borsh::BorshDeserialize; use itertools::Itertools; + use namada::core::chain::ChainId; + use namada::core::ethereum_events::Uint; + use namada::core::hash::Hash; + use namada::core::keccak::KeccakHash; + use namada::core::storage::{BlockHash, BlockHeight, Key}; + use namada::core::time::DurationSecs; + use namada::core::{address, storage}; use namada::eth_bridge::storage::proof::BridgePoolRootProof; use namada::ledger::eth_bridge::storage::bridge_pool; use namada::ledger::gas::STORAGE_ACCESS_GAS_PER_BYTE; use namada::ledger::ibc::storage::ibc_key; use namada::ledger::parameters::{EpochDuration, Parameters}; - use namada::state::write_log::WriteLog; - use namada::state::{ - self, StorageRead, StorageWrite, StoreType, WlStorage, DB, - }; + use namada::state::{self, StorageRead, StorageWrite, StoreType, DB}; use namada::token::conversion::update_allowed_conversions; - use namada::types::chain::ChainId; - use namada::types::ethereum_events::Uint; - use namada::types::hash::Hash; - use namada::types::keccak::KeccakHash; - use namada::types::storage::{BlockHash, BlockHeight, Key}; - use namada::types::time::DurationSecs; - use namada::types::{address, storage}; - use namada::{parameters, types}; + use namada::{decode, encode, parameters}; + use namada_sdk::state::StateRead; use proptest::collection::vec; use proptest::prelude::*; use proptest::test_runner::Config; @@ -84,38 +84,37 @@ mod tests { fn test_crud_value() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), - ChainId::default(), - address::nam(), None, + ChainId::default(), + address::testing::nam(), None, is_merklized_storage_key, ); let key = Key::parse("key").expect("cannot parse the key string"); let value: u64 = 1; - let value_bytes = types::encode(&value); + let value_bytes = encode(&value); let value_bytes_len = value_bytes.len(); // before insertion - let (result, gas) = storage.has_key(&key).expect("has_key failed"); + let (result, gas) = state.db_has_key(&key).expect("has_key failed"); assert!(!result); assert_eq!(gas, key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE); - let (result, gas) = storage.read(&key).expect("read failed"); + let (result, gas) = state.db_read(&key).expect("read failed"); assert_eq!(result, None); assert_eq!(gas, key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE); // insert - storage.write(&key, value_bytes).expect("write failed"); + state.db_write(&key, value_bytes).expect("write failed"); // read - let (result, gas) = storage.has_key(&key).expect("has_key failed"); + let (result, gas) = state.db_has_key(&key).expect("has_key failed"); assert!(result); assert_eq!(gas, key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE); - let (result, gas) = storage.read(&key).expect("read failed"); - let read_value: u64 = - types::decode(result.expect("value doesn't exist")) - .expect("decoding failed"); + let (result, gas) = state.db_read(&key).expect("read failed"); + let read_value: u64 = decode(result.expect("value doesn't exist")) + .expect("decoding failed"); assert_eq!(read_value, value); assert_eq!( gas, @@ -124,12 +123,12 @@ mod tests { ); // delete - storage.delete(&key).expect("delete failed"); + state.db_delete(&key).expect("delete failed"); // read again - let (result, _) = storage.has_key(&key).expect("has_key failed"); + let (result, _) = state.db_has_key(&key).expect("has_key failed"); assert!(!result); - let (result, _) = storage.read(&key).expect("read failed"); + let (result, _) = state.db_read(&key).expect("read failed"); assert_eq!(result, None); } @@ -137,21 +136,21 @@ mod tests { fn test_commit_block() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), - ChainId::default(), - address::nam(), None, + ChainId::default(), + address::testing::nam(), None, is_merklized_storage_key, ); - storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(100)) .expect("begin_block failed"); let key = Key::parse("key").expect("cannot parse the key string"); let value: u64 = 1; - let value_bytes = types::encode(&value); - let mut wl_storage = WlStorage::new(WriteLog::default(), storage); + let value_bytes = encode(&value); // initialize parameter storage let params = Parameters { max_tx_bytes: 1024 * 1024, @@ -173,50 +172,45 @@ mod tests { fee_unshielding_descriptions_limit: 0, minimum_gas_price: Default::default(), }; - parameters::init_storage(¶ms, &mut wl_storage) - .expect("Test failed"); + parameters::init_storage(¶ms, &mut state).expect("Test failed"); // insert and commit - wl_storage - .storage - .write(&key, value_bytes.clone()) - .expect("write failed"); - wl_storage.storage.block.epoch = wl_storage.storage.block.epoch.next(); - wl_storage - .storage + state.db_write(&key, &value_bytes).expect("write failed"); + state.in_mem_mut().block.epoch = state.in_mem().block.epoch.next(); + state + .in_mem_mut() .block .pred_epochs .new_epoch(BlockHeight(100)); - // make wl_storage to update conversion for a new epoch - update_allowed_conversions(&mut wl_storage) + // update conversion for a new epoch + update_allowed_conversions(&mut state) .expect("update conversions failed"); - wl_storage.commit_block().expect("commit failed"); + state.commit_block().expect("commit failed"); // save the last state and the storage - let root = wl_storage.storage.merkle_root().0; - let hash = wl_storage.storage.get_block_hash().0; - let address_gen = wl_storage.storage.address_gen.clone(); - drop(wl_storage); + let root = state.in_mem().merkle_root().0; + let hash = state.in_mem().get_block_hash().0; + let address_gen = state.in_mem().address_gen.clone(); + + // Release DB lock + drop(state); - // load the last state - let mut storage = PersistentStorage::open( + // Load the last state + let state = PersistentState::open( db_path.path(), - ChainId::default(), - address::nam(), None, + ChainId::default(), + address::testing::nam(), None, is_merklized_storage_key, ); - storage - .load_last_state() - .expect("loading the last state failed"); let (loaded_root, height) = - storage.get_state().expect("no block exists"); + state.in_mem().get_state().expect("no block exists"); assert_eq!(loaded_root.0, root); assert_eq!(height, 100); - assert_eq!(storage.get_block_hash().0, hash); - assert_eq!(storage.address_gen, address_gen); - let (val, _) = storage.read(&key).expect("read failed"); + assert_eq!(state.in_mem().get_block_hash().0, hash); + assert_eq!(state.in_mem().address_gen, address_gen); + let (val, _) = state.db_read(&key).expect("read failed"); assert_eq!(val.expect("no value"), value_bytes); } @@ -224,17 +218,14 @@ mod tests { fn test_iter() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), - ChainId::default(), - address::nam(), None, + ChainId::default(), + address::testing::nam(), None, is_merklized_storage_key, ); - storage - .begin_block(BlockHash::default(), BlockHeight(100)) - .expect("begin_block failed"); let mut expected = Vec::new(); let prefix = Key::parse("prefix").expect("cannot parse the key string"); @@ -242,17 +233,17 @@ mod tests { let key = prefix .push(&format!("{}", i)) .expect("cannot push the key segment"); - let value_bytes = types::encode(&(i as u64)); + let value_bytes = encode(&(i as u64)); // insert - storage - .write(&key, value_bytes.clone()) + state + .db_write(&key, value_bytes.clone()) .expect("write failed"); expected.push((key.to_string(), value_bytes)); } - let batch = PersistentStorage::batch(); - storage.commit_block(batch).expect("commit failed"); - let (iter, gas) = storage.iter_prefix(&prefix); + state.commit_block().expect("commit failed"); + + let (iter, gas) = state.db_iter_prefix(&prefix); assert_eq!(gas, (prefix.len() as u64) * STORAGE_ACCESS_GAS_PER_BYTE); for (k, v, gas) in iter { match expected.pop() { @@ -271,34 +262,38 @@ mod tests { fn test_validity_predicate() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), - ChainId::default(), - address::nam(), None, + ChainId::default(), + address::testing::nam(), None, is_merklized_storage_key, ); - storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(100)) .expect("begin_block failed"); - let addr = storage.address_gen.generate_address("test".as_bytes()); + let addr = state + .in_mem_mut() + .address_gen + .generate_address("test".as_bytes()); let key = Key::validity_predicate(&addr); // not exist let (vp, gas) = - storage.validity_predicate(&addr).expect("VP load failed"); + state.validity_predicate(&addr).expect("VP load failed"); assert_eq!(vp, None); assert_eq!(gas, (key.len() as u64) * STORAGE_ACCESS_GAS_PER_BYTE); // insert let vp1 = Hash::sha256("vp1".as_bytes()); - storage.write(&key, vp1).expect("write failed"); + state.db_write(&key, vp1).expect("write failed"); // check let (vp_code_hash, gas) = - storage.validity_predicate(&addr).expect("VP load failed"); + state.validity_predicate(&addr).expect("VP load failed"); assert_eq!(vp_code_hash.expect("no VP"), vp1); assert_eq!( gas, @@ -339,11 +334,11 @@ mod tests { ) -> namada::state::Result<()> { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), - ChainId::default(), - address::nam(), None, + ChainId::default(), + address::testing::nam(), None, is_merklized_storage_key, ); @@ -366,36 +361,37 @@ mod tests { let key = Key::parse("key").expect("cannot parse the key string"); for (height, write_value) in blocks_write_value.clone() { let hash = BlockHash::default(); - storage.begin_block(hash, height)?; + state.in_mem_mut().begin_block(hash, height)?; assert_eq!( - height, storage.block.height, + height, + state.in_mem().block.height, "sanity check - height is as expected" ); if write_value { - let value_bytes = types::encode(&storage.block.height); - storage.write(&key, value_bytes)?; + let value_bytes = encode(&state.in_mem().block.height); + state.db_write(&key, value_bytes)?; } else { - storage.delete(&key)?; + state.db_delete(&key)?; } - let batch = PersistentStorage::batch(); - storage.commit_block(batch)?; + + state.commit_block()?; } // 2. We try to read from these heights to check that we get back // expected value if was written at that block height or // `None` if it was deleted. for (height, write_value) in blocks_write_value.clone() { - let (value_bytes, _gas) = storage.read_with_height(&key, height)?; + let (value_bytes, _gas) = + state.db_read_with_height(&key, height)?; if write_value { let value_bytes = value_bytes.unwrap_or_else(|| { panic!("Couldn't read from height {height}") }); - let value: BlockHeight = types::decode(value_bytes).unwrap(); + let value: BlockHeight = decode(value_bytes).unwrap(); assert_eq!(value, height); } else if value_bytes.is_some() { - let value: BlockHeight = - types::decode(value_bytes.unwrap()).unwrap(); + let value: BlockHeight = decode(value_bytes.unwrap()).unwrap(); panic!("Expected no value at height {height}, got {}", value,); } } @@ -407,19 +403,19 @@ mod tests { let is_last_write = blocks_write_value.last().unwrap().1; // The upper bound is arbitrary. - for height in storage.get_last_block_height().0 - ..storage.get_last_block_height().0 + 10 + for height in state.in_mem().get_last_block_height().0 + ..state.in_mem().get_last_block_height().0 + 10 { let height = BlockHeight::from(height); - let (value_bytes, _gas) = storage.read_with_height(&key, height)?; + let (value_bytes, _gas) = + state.db_read_with_height(&key, height)?; if is_last_write { let value_bytes = value_bytes.expect("Should have been written"); - let value: BlockHeight = types::decode(value_bytes).unwrap(); - assert_eq!(value, storage.get_last_block_height()); + let value: BlockHeight = decode(value_bytes).unwrap(); + assert_eq!(value, state.in_mem().get_last_block_height()); } else if value_bytes.is_some() { - let value: BlockHeight = - types::decode(value_bytes.unwrap()).unwrap(); + let value: BlockHeight = decode(value_bytes.unwrap()).unwrap(); panic!("Expected no value at height {height}, got {}", value,); } } @@ -433,11 +429,11 @@ mod tests { ) -> namada::state::Result<()> { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), - ChainId::default(), - address::nam(), None, + ChainId::default(), + address::testing::nam(), None, is_merklized_storage_key, ); @@ -458,54 +454,59 @@ mod tests { // write values at Height 0 like init_storage for i in 0..num_keys { let key = ibc_key(format!("key{}", i)).unwrap(); - let value_bytes = types::encode(&storage.block.height); - storage.write(&key, value_bytes)?; + let value_bytes = encode(&state.in_mem().block.height); + state.db_write(&key, value_bytes)?; } let key = bridge_pool::get_signed_root_key(); let root_proof = BridgePoolRootProof::new((KeccakHash::default(), Uint::default())); - let bytes = types::encode(&root_proof); - storage.write(&key, bytes)?; + let bytes = encode(&root_proof); + state.db_write(&key, bytes)?; // Update and commit let hash = BlockHash::default(); let height = BlockHeight(1); - storage.begin_block(hash, height)?; + state.in_mem_mut().begin_block(hash, height)?; // Epoch 0 - storage.block.pred_epochs.new_epoch(height); - let mut batch = PersistentStorage::batch(); + state.in_mem_mut().block.pred_epochs.new_epoch(height); + let mut batch = PersistentState::batch(); for (height, key, write_type) in blocks_write_type.clone() { - if height != storage.block.height { + if height != state.in_mem().block.height { // to check the root later - roots.insert(storage.block.height, storage.merkle_root()); - if storage.block.height.0 % 5 == 0 { + roots.insert( + state.in_mem().block.height, + state.in_mem().merkle_root(), + ); + if state.in_mem().block.height.0 % 5 == 0 { // new epoch every 5 heights - storage.block.epoch = storage.block.epoch.next(); - storage.block.pred_epochs.new_epoch(storage.block.height); + state.in_mem_mut().block.epoch = + state.in_mem().block.epoch.next(); + let height = state.in_mem().block.height; + state.in_mem_mut().block.pred_epochs.new_epoch(height); } - storage.commit_block(batch)?; + state.commit_block_from_batch(batch)?; let hash = BlockHash::default(); - storage - .begin_block(hash, storage.block.height.next_height())?; - batch = PersistentStorage::batch(); + let next_height = state.in_mem().block.height.next_height(); + state.in_mem_mut().begin_block(hash, next_height)?; + batch = PersistentState::batch(); } match write_type { 0 => { // no update } 1 => { - storage.delete(&key)?; + state.db_delete(&key)?; } 2 => { - let value_bytes = types::encode(&storage.block.height); - storage.write(&key, value_bytes)?; + let value_bytes = encode(&state.in_mem().block.height); + state.db_write(&key, value_bytes)?; } 3 => { - storage.batch_delete_subspace_val(&mut batch, &key)?; + state.batch_delete_subspace_val(&mut batch, &key)?; } _ => { - let value_bytes = types::encode(&storage.block.height); - storage.batch_write_subspace_val( + let value_bytes = encode(&state.in_mem().block.height); + state.batch_write_subspace_val( &mut batch, &key, value_bytes, @@ -513,8 +514,8 @@ mod tests { } } } - roots.insert(storage.block.height, storage.merkle_root()); - storage.commit_block(batch)?; + roots.insert(state.in_mem().block.height, state.in_mem().merkle_root()); + state.commit_block_from_batch(batch)?; let mut current_state = HashMap::new(); for i in 0..num_keys { @@ -523,7 +524,7 @@ mod tests { } // Check a Merkle tree for (height, key, write_type) in blocks_write_type { - let tree = storage.get_merkle_tree(height, Some(StoreType::Ibc))?; + let tree = state.get_merkle_tree(height, Some(StoreType::Ibc))?; assert_eq!(tree.root().0, roots.get(&height).unwrap().0); match write_type { 0 => { @@ -552,11 +553,11 @@ mod tests { fn test_prune_merkle_tree_stores() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), - ChainId::default(), - address::nam(), None, + ChainId::default(), + address::testing::nam(), Some(5), is_merklized_storage_key, ); @@ -565,91 +566,107 @@ mod tests { // the first nonce isn't written for a test skipping pruning let nonce = Uint::default(); - storage + state + .in_mem_mut() .begin_block(BlockHash::default(), new_epoch_start) .expect("begin_block failed"); let key = ibc_key("key").unwrap(); let value: u64 = 1; - storage - .write(&key, types::encode(&value)) - .expect("write failed"); + state.db_write(&key, encode(&value)).expect("write failed"); + + state + .in_mem_mut() + .block + .pred_epochs + .new_epoch(new_epoch_start); - storage.block.pred_epochs.new_epoch(new_epoch_start); - let batch = PersistentStorage::batch(); - storage.commit_block(batch).expect("commit failed"); + state.commit_block().expect("commit failed"); let new_epoch_start = BlockHeight(6); - storage + state + .in_mem_mut() .begin_block(BlockHash::default(), new_epoch_start) .expect("begin_block failed"); let key = ibc_key("key2").unwrap(); let value: u64 = 2; - storage - .write(&key, types::encode(&value)) - .expect("write failed"); + state.db_write(&key, encode(&value)).expect("write failed"); // the second nonce isn't written for a test skipping pruning let nonce = nonce + 1; - storage.block.epoch = storage.block.epoch.next(); - storage.block.pred_epochs.new_epoch(new_epoch_start); - let batch = PersistentStorage::batch(); - storage.commit_block(batch).expect("commit failed"); + state.in_mem_mut().block.epoch = state.in_mem().block.epoch.next(); + state + .in_mem_mut() + .block + .pred_epochs + .new_epoch(new_epoch_start); + + state.commit_block().expect("commit failed"); - let result = storage.get_merkle_tree(1.into(), Some(StoreType::Ibc)); + let result = state.get_merkle_tree(1.into(), Some(StoreType::Ibc)); assert!(result.is_ok(), "The tree at Height 1 should be restored"); let new_epoch_start = BlockHeight(11); - storage + state + .in_mem_mut() .begin_block(BlockHash::default(), new_epoch_start) .expect("begin_block failed"); let nonce = nonce + 1; let root_proof = BridgePoolRootProof::new((KeccakHash::default(), nonce)); - let bytes = types::encode(&root_proof); - storage.write(&signed_root_key, bytes).unwrap(); + let bytes = encode(&root_proof); + state.db_write(&signed_root_key, bytes).unwrap(); + + state.in_mem_mut().block.epoch = state.in_mem().block.epoch.next(); + state + .in_mem_mut() + .block + .pred_epochs + .new_epoch(new_epoch_start); - storage.block.epoch = storage.block.epoch.next(); - storage.block.pred_epochs.new_epoch(new_epoch_start); - let batch = PersistentStorage::batch(); - storage.commit_block(batch).expect("commit failed"); + state.commit_block().expect("commit failed"); - let result = storage.get_merkle_tree(1.into(), Some(StoreType::Ibc)); + let result = state.get_merkle_tree(1.into(), Some(StoreType::Ibc)); assert!(result.is_err(), "The tree at Height 1 should be pruned"); - let result = storage.get_merkle_tree(5.into(), Some(StoreType::Ibc)); + let result = state.get_merkle_tree(5.into(), Some(StoreType::Ibc)); assert!( result.is_err(), "The tree at Height 5 shouldn't be able to be restored" ); - let result = storage.get_merkle_tree(6.into(), Some(StoreType::Ibc)); + let result = state.get_merkle_tree(6.into(), Some(StoreType::Ibc)); assert!(result.is_ok(), "The ibc tree should be restored"); let result = - storage.get_merkle_tree(6.into(), Some(StoreType::BridgePool)); + state.get_merkle_tree(6.into(), Some(StoreType::BridgePool)); assert!(result.is_ok(), "The bridge pool tree should be restored"); - storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(12)) .expect("begin_block failed"); let nonce = nonce + 1; let root_proof = BridgePoolRootProof::new((KeccakHash::default(), nonce)); - let bytes = types::encode(&root_proof); - storage.write(&signed_root_key, bytes).unwrap(); - storage.block.epoch = storage.block.epoch.next(); - storage.block.pred_epochs.new_epoch(BlockHeight(12)); - let batch = PersistentStorage::batch(); - storage.commit_block(batch).expect("commit failed"); + let bytes = encode(&root_proof); + state.db_write(&signed_root_key, bytes).unwrap(); + state.in_mem_mut().block.epoch = state.in_mem().block.epoch.next(); + state + .in_mem_mut() + .block + .pred_epochs + .new_epoch(BlockHeight(12)); + + state.commit_block().expect("commit failed"); // ibc tree should be able to be restored - let result = storage.get_merkle_tree(6.into(), Some(StoreType::Ibc)); + let result = state.get_merkle_tree(6.into(), Some(StoreType::Ibc)); assert!(result.is_ok(), "The ibc tree should be restored"); // bridge pool tree should be pruned because of the nonce let result = - storage.get_merkle_tree(6.into(), Some(StoreType::BridgePool)); + state.get_merkle_tree(6.into(), Some(StoreType::BridgePool)); assert!(result.is_err(), "The bridge pool tree should be pruned"); } @@ -658,18 +675,14 @@ mod tests { fn test_persistent_storage_prefix_iter() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), - ChainId::default(), - address::nam(), None, + ChainId::default(), + address::testing::nam(), None, is_merklized_storage_key, ); - let mut storage = WlStorage { - storage, - write_log: Default::default(), - }; let prefix = storage::Key::parse("prefix").unwrap(); let mismatched_prefix = storage::Key::parse("different").unwrap(); @@ -678,14 +691,14 @@ mod tests { for i in sub_keys.iter() { let key = prefix.push(i).unwrap(); - storage.write(&key, i).unwrap(); + state.write(&key, i).unwrap(); let key = mismatched_prefix.push(i).unwrap(); - storage.write(&key, i / 2).unwrap(); + state.write(&key, i / 2).unwrap(); } // Then try to iterate over their prefix - let iter = state::iter_prefix(&storage, &prefix) + let iter = state::iter_prefix(&state, &prefix) .unwrap() .map(Result::unwrap); @@ -697,10 +710,10 @@ mod tests { itertools::assert_equal(iter, expected.clone()); // Commit genesis state - storage.commit_block().unwrap(); + state.commit_block().unwrap(); // Again, try to iterate over their prefix - let iter = state::iter_prefix(&storage, &prefix) + let iter = state::iter_prefix(&state, &prefix) .unwrap() .map(Result::unwrap); itertools::assert_equal(iter, expected); @@ -712,13 +725,13 @@ mod tests { ); for i in more_sub_keys.iter() { let key = prefix.push(i).unwrap(); - storage.write(&key, i).unwrap(); + state.write(&key, i).unwrap(); let key = mismatched_prefix.push(i).unwrap(); - storage.write(&key, i / 2).unwrap(); + state.write(&key, i / 2).unwrap(); } - let iter = state::iter_prefix(&storage, &prefix) + let iter = state::iter_prefix(&state, &prefix) .unwrap() .map(Result::unwrap); @@ -734,11 +747,11 @@ mod tests { let delete_keys = [2, 0, -10, 123]; for i in delete_keys.iter() { let key = prefix.push(i).unwrap(); - storage.delete(&key).unwrap() + state.delete(&key).unwrap() } // Check that iter_prefix doesn't return deleted keys anymore - let iter = state::iter_prefix(&storage, &prefix) + let iter = state::iter_prefix(&state, &prefix) .unwrap() .map(Result::unwrap); let expected = merged @@ -748,10 +761,10 @@ mod tests { itertools::assert_equal(iter, expected.clone()); // Commit genesis state - storage.commit_block().unwrap(); + state.commit_block().unwrap(); // And check again - let iter = state::iter_prefix(&storage, &prefix) + let iter = state::iter_prefix(&state, &prefix) .unwrap() .map(Result::unwrap); itertools::assert_equal(iter, expected); @@ -773,21 +786,17 @@ mod tests { fn test_persistent_storage_writing_without_merklizing_or_diffs() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), - ChainId::default(), - address::nam(), None, + ChainId::default(), + address::testing::nam(), None, merkle_tree_key_filter, ); - let mut wls = WlStorage { - storage, - write_log: Default::default(), - }; // Start the first block let first_height = BlockHeight::first(); - wls.storage.block.height = first_height; + state.in_mem_mut().block.height = first_height; let key1 = test_key_1(); let val1 = 1u64; @@ -795,61 +804,60 @@ mod tests { let val2 = 2u64; // Standard write of key-val-1 - wls.write(&key1, val1).unwrap(); + state.write(&key1, val1).unwrap(); - // Read from WlStorage should return val1 - let res = wls.read::(&key1).unwrap().unwrap(); + // Read from TestState should return val1 + let res = state.read::(&key1).unwrap().unwrap(); assert_eq!(res, val1); // Read from Storage shouldn't return val1 because the block hasn't been // committed - let (res, _) = wls.storage.read(&key1).unwrap(); + let (res, _) = state.db_read(&key1).unwrap(); assert!(res.is_none()); // Write key-val-2 without merklizing or diffs - wls.write(&key2, val2).unwrap(); + state.write(&key2, val2).unwrap(); - // Read from WlStorage should return val2 - let res = wls.read::(&key2).unwrap().unwrap(); + // Read from TestState should return val2 + let res = state.read::(&key2).unwrap().unwrap(); assert_eq!(res, val2); // Commit block and storage changes - wls.commit_block().unwrap(); - wls.storage.block.height = wls.storage.block.height.next_height(); - let second_height = wls.storage.block.height; + state.commit_block().unwrap(); + state.in_mem_mut().block.height = + state.in_mem_mut().block.height.next_height(); + let second_height = state.in_mem().block.height; // Read key1 from Storage should return val1 - let (res1, _) = wls.storage.read(&key1).unwrap(); + let (res1, _) = state.db_read(&key1).unwrap(); let res1 = u64::try_from_slice(&res1.unwrap()).unwrap(); assert_eq!(res1, val1); // Check merkle tree inclusion of key-val-1 explicitly - let is_merklized1 = wls.storage.block.tree.has_key(&key1).unwrap(); + let is_merklized1 = state.in_mem().block.tree.has_key(&key1).unwrap(); assert!(is_merklized1); // Key2 should be in storage. Confirm by reading from - // WlStorage and also by reading Storage subspace directly - let res2 = wls.read::(&key2).unwrap().unwrap(); + // TestState and also by reading Storage subspace directly + let res2 = state.read::(&key2).unwrap().unwrap(); assert_eq!(res2, val2); - let res2 = wls.storage.db.read_subspace_val(&key2).unwrap().unwrap(); + let res2 = state.db().read_subspace_val(&key2).unwrap().unwrap(); let res2 = u64::try_from_slice(&res2).unwrap(); assert_eq!(res2, val2); // Check explicitly that key-val-2 is not in merkle tree - let is_merklized2 = wls.storage.block.tree.has_key(&key2).unwrap(); + let is_merklized2 = state.in_mem().block.tree.has_key(&key2).unwrap(); assert!(!is_merklized2); // Check that the proper diffs exist for key-val-1 - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, first_height, true) .unwrap(); assert!(res1.is_none()); - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, first_height, false) .unwrap() .unwrap(); @@ -858,15 +866,13 @@ mod tests { // Check that there are diffs for key-val-2 in block 0, since all keys // need to have diffs for at least 1 block for rollback purposes - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, first_height, true) .unwrap(); assert!(res2.is_none()); - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, first_height, false) .unwrap() .unwrap(); @@ -874,84 +880,77 @@ mod tests { assert_eq!(res2, val2); // Delete the data then commit the block - wls.delete(&key1).unwrap(); - wls.delete(&key2).unwrap(); - wls.commit_block().unwrap(); - wls.storage.block.height = wls.storage.block.height.next_height(); + state.delete(&key1).unwrap(); + state.delete(&key2).unwrap(); + state.commit_block().unwrap(); + state.in_mem_mut().block.height = + state.in_mem().block.height.next_height(); // Check the key-vals are removed from the storage subspace - let res1 = wls.read::(&key1).unwrap(); - let res2 = wls.read::(&key2).unwrap(); + let res1 = state.read::(&key1).unwrap(); + let res2 = state.read::(&key2).unwrap(); assert!(res1.is_none() && res2.is_none()); - let res1 = wls.storage.db.read_subspace_val(&key1).unwrap(); - let res2 = wls.storage.db.read_subspace_val(&key2).unwrap(); + let res1 = state.db().read_subspace_val(&key1).unwrap(); + let res2 = state.db().read_subspace_val(&key2).unwrap(); assert!(res1.is_none() && res2.is_none()); // Check that the key-vals don't exist in the merkle tree anymore - let is_merklized1 = wls.storage.block.tree.has_key(&key1).unwrap(); - let is_merklized2 = wls.storage.block.tree.has_key(&key2).unwrap(); + let is_merklized1 = state.in_mem().block.tree.has_key(&key1).unwrap(); + let is_merklized2 = state.in_mem().block.tree.has_key(&key2).unwrap(); assert!(!is_merklized1 && !is_merklized2); // Check that key-val-1 diffs are properly updated for blocks 0 and 1 - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, first_height, true) .unwrap(); assert!(res1.is_none()); - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, first_height, false) .unwrap() .unwrap(); let res1 = u64::try_from_slice(&res1).unwrap(); assert_eq!(res1, val1); - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, second_height, true) .unwrap() .unwrap(); let res1 = u64::try_from_slice(&res1).unwrap(); assert_eq!(res1, val1); - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, second_height, false) .unwrap(); assert!(res1.is_none()); // Check that key-val-2 diffs don't exist for block 0 anymore - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, first_height, true) .unwrap(); assert!(res2.is_none()); - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, first_height, false) .unwrap(); assert!(res2.is_none()); // Check that the block 1 diffs for key-val-2 include an "old" value of // val2 and no "new" value - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, second_height, true) .unwrap() .unwrap(); let res2 = u64::try_from_slice(&res2).unwrap(); assert_eq!(res2, val2); - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, second_height, false) .unwrap(); assert!(res2.is_none()); diff --git a/crates/apps/src/lib/node/ledger/storage/rocksdb.rs b/crates/apps/src/lib/node/ledger/storage/rocksdb.rs index c708605ad7..18e2fa13f6 100644 --- a/crates/apps/src/lib/node/ledger/storage/rocksdb.rs +++ b/crates/apps/src/lib/node/ledger/storage/rocksdb.rs @@ -48,24 +48,22 @@ use borsh::BorshDeserialize; use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; use itertools::Either; +use namada::core::storage::{ + BlockHeight, BlockResults, Epoch, EthEventsQueue, Header, Key, KeySeg, + KEY_SEGMENT_SEPARATOR, +}; +use namada::core::time::DateTimeUtc; +use namada::core::{decode, encode, ethereum_events, ethereum_structs}; use namada::eth_bridge::storage::proof::BridgePoolRootProof; use namada::ledger::eth_bridge::storage::bridge_pool; -use namada::ledger::replay_protection; use namada::ledger::storage::tx_queue::TxQueue; +use namada::replay_protection; use namada::state::merkle_tree::{base_tree_key_prefix, subtree_key_prefix}; -use namada::state::types::PrefixIterator; use namada::state::{ BlockStateRead, BlockStateWrite, DBIter, DBWriteBatch, DbError as Error, - DbResult as Result, MerkleTreeStoresRead, StoreType, DB, -}; -use namada::types; -use namada::types::storage::{ - BlockHeight, BlockResults, Epoch, EthEventsQueue, Header, Key, KeySeg, - KEY_SEGMENT_SEPARATOR, + DbResult as Result, MerkleTreeStoresRead, PrefixIterator, StoreType, DB, }; -use namada::types::time::DateTimeUtc; -use namada::types::token::ConversionState; -use namada::types::{ethereum_events, ethereum_structs}; +use namada::token::ConversionState; use rayon::prelude::*; use rocksdb::{ BlockBasedOptions, ColumnFamily, ColumnFamilyDescriptor, DBCompactionStyle, @@ -339,7 +337,7 @@ impl RocksDB { .get_column_family(STATE_CF) .expect("State column family should exist"); - let last_height: BlockHeight = types::decode( + let last_height: BlockHeight = decode( self.0 .get_cf(state_cf, "height") .expect("Unable to read DB") @@ -513,7 +511,7 @@ impl RocksDB { // three keys in storage we can only perform one rollback before // restarting the chain tracing::info!("Reverting non-height-prepended metadata keys"); - batch.put_cf(state_cf, "height", types::encode(&previous_height)); + batch.put_cf(state_cf, "height", encode(&previous_height)); for metadata_key in [ "next_epoch_min_start_height", "next_epoch_min_start_time", @@ -665,7 +663,7 @@ impl DB for RocksDB { Some(bytes) => { // TODO if there's an issue decoding this height, should we try // load its predecessor instead? - types::decode(bytes).map_err(Error::CodingError)? + decode(bytes).map_err(Error::CodingError)? } None => return Ok(None), }; @@ -678,7 +676,7 @@ impl DB for RocksDB { .get_cf(block_cf, results_path) .map_err(|e| Error::DBError(e.into_string()))? { - Some(bytes) => types::decode(bytes).map_err(Error::CodingError)?, + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => return Ok(None), }; @@ -688,7 +686,7 @@ impl DB for RocksDB { .get_cf(state_cf, "next_epoch_min_start_height") .map_err(|e| Error::DBError(e.into_string()))? { - Some(bytes) => types::decode(bytes).map_err(Error::CodingError)?, + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => { tracing::error!( "Couldn't load next epoch start height from the DB" @@ -701,7 +699,7 @@ impl DB for RocksDB { .get_cf(state_cf, "next_epoch_min_start_time") .map_err(|e| Error::DBError(e.into_string()))? { - Some(bytes) => types::decode(bytes).map_err(Error::CodingError)?, + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => { tracing::error!( "Couldn't load next epoch start time from the DB" @@ -714,7 +712,7 @@ impl DB for RocksDB { .get_cf(state_cf, "update_epoch_blocks_delay") .map_err(|e| Error::DBError(e.into_string()))? { - Some(bytes) => types::decode(bytes).map_err(Error::CodingError)?, + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => { tracing::error!( "Couldn't load epoch update block delay from the DB" @@ -727,7 +725,7 @@ impl DB for RocksDB { .get_cf(state_cf, "conversion_state") .map_err(|e| Error::DBError(e.into_string()))? { - Some(bytes) => types::decode(bytes).map_err(Error::CodingError)?, + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => { tracing::error!("Couldn't load conversion state from the DB"); return Ok(None); @@ -738,7 +736,7 @@ impl DB for RocksDB { .get_cf(state_cf, "tx_queue") .map_err(|e| Error::DBError(e.into_string()))? { - Some(bytes) => types::decode(bytes).map_err(Error::CodingError)?, + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => { tracing::error!("Couldn't load tx queue from the DB"); return Ok(None); @@ -750,7 +748,7 @@ impl DB for RocksDB { .get_cf(state_cf, "ethereum_height") .map_err(|e| Error::DBError(e.into_string()))? { - Some(bytes) => types::decode(bytes).map_err(Error::CodingError)?, + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => { tracing::error!("Couldn't load ethereum height from the DB"); return Ok(None); @@ -762,7 +760,7 @@ impl DB for RocksDB { .get_cf(state_cf, "eth_events_queue") .map_err(|e| Error::DBError(e.into_string()))? { - Some(bytes) => types::decode(bytes).map_err(Error::CodingError)?, + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => { tracing::error!( "Couldn't load the eth events queue from the DB" @@ -811,7 +809,7 @@ impl DB for RocksDB { match segments.get(3) { Some(&"root") => merkle_tree_stores.set_root( &st, - types::decode(bytes) + decode(bytes) .map_err(Error::CodingError)?, ), Some(&"store") => merkle_tree_stores @@ -825,29 +823,21 @@ impl DB for RocksDB { // the block header doesn't have to be restored } "hash" => { - hash = Some( - types::decode(bytes).map_err(Error::CodingError)?, - ) + hash = Some(decode(bytes).map_err(Error::CodingError)?) } "time" => { - time = Some( - types::decode(bytes).map_err(Error::CodingError)?, - ) + time = Some(decode(bytes).map_err(Error::CodingError)?) } "epoch" => { - epoch = Some( - types::decode(bytes).map_err(Error::CodingError)?, - ) + epoch = Some(decode(bytes).map_err(Error::CodingError)?) } "pred_epochs" => { - pred_epochs = Some( - types::decode(bytes).map_err(Error::CodingError)?, - ) + pred_epochs = + Some(decode(bytes).map_err(Error::CodingError)?) } "address_gen" => { - address_gen = Some( - types::decode(bytes).map_err(Error::CodingError)?, - ); + address_gen = + Some(decode(bytes).map_err(Error::CodingError)?); } _ => unknown_key_error(path)?, }, @@ -867,7 +857,7 @@ impl DB for RocksDB { { merkle_tree_stores.set_root( st, - types::decode(bytes).map_err(Error::CodingError)?, + decode(bytes).map_err(Error::CodingError)?, ); } let store_key = key_prefix.with_segment("store".to_owned()); @@ -953,7 +943,7 @@ impl DB for RocksDB { batch.0.put_cf( state_cf, "next_epoch_min_start_height", - types::encode(&next_epoch_min_start_height), + encode(&next_epoch_min_start_height), ); if let Some(current_value) = self @@ -971,7 +961,7 @@ impl DB for RocksDB { batch.0.put_cf( state_cf, "next_epoch_min_start_time", - types::encode(&next_epoch_min_start_time), + encode(&next_epoch_min_start_time), ); if let Some(current_value) = self .0 @@ -988,7 +978,7 @@ impl DB for RocksDB { batch.0.put_cf( state_cf, "update_epoch_blocks_delay", - types::encode(&update_epoch_blocks_delay), + encode(&update_epoch_blocks_delay), ); // Save the conversion state when the epoch is updated @@ -1008,7 +998,7 @@ impl DB for RocksDB { batch.0.put_cf( state_cf, "conversion_state", - types::encode(conversion_state), + encode(conversion_state), ); } @@ -1021,19 +1011,13 @@ impl DB for RocksDB { // Write the predecessor value for rollback batch.0.put_cf(state_cf, "pred/tx_queue", pred_tx_queue); } + batch.0.put_cf(state_cf, "tx_queue", encode(&tx_queue)); batch .0 - .put_cf(state_cf, "tx_queue", types::encode(&tx_queue)); - batch.0.put_cf( - state_cf, - "ethereum_height", - types::encode(ðereum_height), - ); - batch.0.put_cf( - state_cf, - "eth_events_queue", - types::encode(ð_events_queue), - ); + .put_cf(state_cf, "ethereum_height", encode(ðereum_height)); + batch + .0 + .put_cf(state_cf, "eth_events_queue", encode(ð_events_queue)); let block_cf = self.get_column_family(BLOCK_CF)?; let prefix_key = Key::from(height.to_db_key()); @@ -1051,7 +1035,7 @@ impl DB for RocksDB { batch.0.put_cf( block_cf, root_key.to_string(), - types::encode(merkle_tree_stores.root(st)), + encode(merkle_tree_stores.root(st)), ); let store_key = key_prefix.with_segment("store".to_owned()); batch.0.put_cf( @@ -1078,60 +1062,48 @@ impl DB for RocksDB { let key = prefix_key .push(&"hash".to_owned()) .map_err(Error::KeyError)?; - batch - .0 - .put_cf(block_cf, key.to_string(), types::encode(&hash)); + batch.0.put_cf(block_cf, key.to_string(), encode(&hash)); } // Block time { let key = prefix_key .push(&"time".to_owned()) .map_err(Error::KeyError)?; - batch - .0 - .put_cf(block_cf, key.to_string(), types::encode(&time)); + batch.0.put_cf(block_cf, key.to_string(), encode(&time)); } // Block epoch { let key = prefix_key .push(&"epoch".to_owned()) .map_err(Error::KeyError)?; - batch - .0 - .put_cf(block_cf, key.to_string(), types::encode(&epoch)); + batch.0.put_cf(block_cf, key.to_string(), encode(&epoch)); } // Block results { let results_path = format!("results/{}", height.raw()); - batch - .0 - .put_cf(block_cf, results_path, types::encode(&results)); + batch.0.put_cf(block_cf, results_path, encode(&results)); } // Predecessor block epochs { let key = prefix_key .push(&"pred_epochs".to_owned()) .map_err(Error::KeyError)?; - batch.0.put_cf( - block_cf, - key.to_string(), - types::encode(&pred_epochs), - ); + batch + .0 + .put_cf(block_cf, key.to_string(), encode(&pred_epochs)); } // Address gen { let key = prefix_key .push(&"address_gen".to_owned()) .map_err(Error::KeyError)?; - batch.0.put_cf( - block_cf, - key.to_string(), - types::encode(&address_gen), - ); + batch + .0 + .put_cf(block_cf, key.to_string(), encode(&address_gen)); } // Block height - batch.0.put_cf(state_cf, "height", types::encode(&height)); + batch.0.put_cf(state_cf, "height", encode(&height)); Ok(()) } @@ -1181,7 +1153,7 @@ impl DB for RocksDB { .map_err(|e| Error::DBError(e.into_string()))?; match bytes { Some(b) => { - let root = types::decode(b).map_err(Error::CodingError)?; + let root = decode(b).map_err(Error::CodingError)?; merkle_tree_stores.set_root(st, root); } None => return Ok(None), @@ -1204,7 +1176,7 @@ impl DB for RocksDB { fn has_replay_protection_entry( &self, - hash: &namada::types::hash::Hash, + hash: &namada::core::hash::Hash, ) -> Result { let replay_protection_cf = self.get_column_family(REPLAY_PROTECTION_CF)?; @@ -1765,8 +1737,6 @@ fn set_max_open_files(cf_opts: &mut rocksdb::Options) { #[cfg(unix)] mod imp { - use std::convert::TryInto; - use rlimit::{Resource, Rlim}; const DEFAULT_NOFILE_LIMIT: Rlim = Rlim::from_raw(16384); @@ -1814,11 +1784,11 @@ mod imp { #[cfg(test)] mod test { - use namada::state::{MerkleTree, Sha256Hasher}; - use namada::types::address::{ + use namada::core::address::{ gen_established_address, EstablishedAddressGen, }; - use namada::types::storage::{BlockHash, Epoch, Epochs}; + use namada::core::storage::{BlockHash, Epochs}; + use namada::state::{MerkleTree, Sha256Hasher}; use tempfile::tempdir; use test_log::test; @@ -1979,7 +1949,7 @@ mod test { let keys_0 = vec![key_0_a, key_0_b, key_0_c]; let keys_1 = vec![key_1_a, key_1_b, key_1_c]; let keys_01 = vec![key_01_a]; - let all_keys = vec![keys_0.clone(), keys_01, keys_1.clone()].concat(); + let all_keys = [keys_0.clone(), keys_01, keys_1.clone()].concat(); // Write the keys let mut batch = RocksDB::batch(); @@ -2121,7 +2091,7 @@ mod test { db.0.get_cf(state_cf, "conversion_state".as_bytes()) .unwrap() .unwrap(); - assert_eq!(conversion_state, types::encode(&conversion_state_0)); + assert_eq!(conversion_state, encode(&conversion_state_0)); } #[test] diff --git a/crates/apps/src/lib/node/ledger/tendermint_node.rs b/crates/apps/src/lib/node/ledger/tendermint_node.rs index 9efd96de5e..b08dce9ca6 100644 --- a/crates/apps/src/lib/node/ledger/tendermint_node.rs +++ b/crates/apps/src/lib/node/ledger/tendermint_node.rs @@ -4,10 +4,10 @@ use std::process::Stdio; use std::str::FromStr; use borsh_ext::BorshSerializeExt; -use namada::types::chain::ChainId; -use namada::types::key::*; -use namada::types::storage::BlockHeight; -use namada::types::time::DateTimeUtc; +use namada::core::chain::ChainId; +use namada::core::key::*; +use namada::core::storage::BlockHeight; +use namada::core::time::DateTimeUtc; use serde_json::json; use sha2::{Digest, Sha256}; use thiserror::Error; @@ -461,7 +461,7 @@ async fn write_tm_genesis( // gas is metered app-side, so we disable it // at the Tendermint level max_gas: -1, - /// This parameter has no value anymore in Tendermint-core + // This parameter has no value anymore in Tendermint-core time_iota_ms: block::Size::default_time_iota_ms(), }; genesis.consensus_params.block = size; diff --git a/crates/apps/src/lib/wallet/defaults.rs b/crates/apps/src/lib/wallet/defaults.rs index 6b53ee5545..1885c2ade7 100644 --- a/crates/apps/src/lib/wallet/defaults.rs +++ b/crates/apps/src/lib/wallet/defaults.rs @@ -13,11 +13,12 @@ mod dev { use std::collections::HashMap; use lazy_static::lazy_static; - use namada::ledger::{governance, pgf, pos}; - use namada::types::address::{ - apfel, btc, dot, eth, kartoffel, nam, schnitzel, Address, + use namada::core::address::testing::{ + apfel, btc, dot, eth, kartoffel, nam, schnitzel, }; - use namada::types::key::*; + use namada::core::address::Address; + use namada::core::key::*; + use namada::ledger::{governance, pgf, pos}; use namada_sdk::wallet::alias::Alias; use namada_sdk::wallet::pre_genesis::ValidatorWallet; use namada_sdk::wallet::Wallet; @@ -78,7 +79,7 @@ mod dev { ("christel".into(), christel_address()), ("daewon".into(), daewon_address()), ("ester".into(), ester_address()), - ("masp".into(), namada::types::address::MASP), + ("masp".into(), namada::core::address::MASP), ] .into_iter() .collect(); diff --git a/crates/apps/src/lib/wallet/mod.rs b/crates/apps/src/lib/wallet/mod.rs index 3ed2969742..b35575e25b 100644 --- a/crates/apps/src/lib/wallet/mod.rs +++ b/crates/apps/src/lib/wallet/mod.rs @@ -7,7 +7,7 @@ use std::path::{Path, PathBuf}; use std::{env, fs}; use namada::bip39::{Language, Mnemonic}; -use namada::types::key::*; +use namada::core::key::*; pub use namada_sdk::wallet::alias::Alias; use namada_sdk::wallet::fs::FsWalletStorage; use namada_sdk::wallet::store::Store; @@ -288,7 +288,6 @@ pub fn read_and_confirm_encryption_password( mod tests { use namada::bip39::MnemonicType; use namada_sdk::wallet::WalletIo; - use rand_core; use super::CliWalletUtils; diff --git a/crates/apps/src/lib/wallet/pre_genesis.rs b/crates/apps/src/lib/wallet/pre_genesis.rs index 12f88ed99a..a39a8358f6 100644 --- a/crates/apps/src/lib/wallet/pre_genesis.rs +++ b/crates/apps/src/lib/wallet/pre_genesis.rs @@ -3,7 +3,7 @@ use std::io::{Read, Write}; use std::path::{Path, PathBuf}; use fd_lock::RwLock; -use namada::types::key::SchemeType; +use namada::core::key::SchemeType; use namada_sdk::wallet::pre_genesis::{ ReadError, ValidatorStore, ValidatorWallet, }; diff --git a/crates/apps/src/lib/wallet/store.rs b/crates/apps/src/lib/wallet/store.rs index c721b02d9c..20021765a5 100644 --- a/crates/apps/src/lib/wallet/store.rs +++ b/crates/apps/src/lib/wallet/store.rs @@ -1,6 +1,6 @@ use std::path::{Path, PathBuf}; -use namada::types::key::*; +use namada::core::key::*; use namada_sdk::wallet::{ gen_secret_key, LoadStoreError, Store, ValidatorKeys, }; @@ -59,7 +59,7 @@ pub fn gen_validator_keys( #[cfg(test)] mod test_wallet { - use namada::types::address::Address; + use namada::core::address::Address; use super::*; diff --git a/crates/benches/README.md b/crates/benches/README.md index 9f73d2c15b..62980150ac 100644 --- a/crates/benches/README.md +++ b/crates/benches/README.md @@ -6,7 +6,7 @@ Measurements are taken on the elapsed wall-time. The benchmarks only focus on successful transactions and vps: in case of failure, the bench function shall panic to avoid timing incomplete execution paths. -In addition, this crate also contains benchmarks for `WrapperTx` (`namada::core::types::transaction::wrapper::WrapperTx`) validation and `host_env` (`namada::vm::host_env`) exposed functions that define the gas constants of `gas` (`namada::core::ledger::gas`). +In addition, this crate also contains benchmarks for `WrapperTx` (`namada::core::transaction::wrapper::WrapperTx`) validation and `host_env` (`namada::vm::host_env`) exposed functions that define the gas constants of `gas` (`namada::core::ledger::gas`). For more realistic results these benchmarks should be run on all the combination of supported OS/architecture. diff --git a/crates/benches/host_env.rs b/crates/benches/host_env.rs index 36a79cea05..a3955d44c4 100644 --- a/crates/benches/host_env.rs +++ b/crates/benches/host_env.rs @@ -1,8 +1,8 @@ use std::collections::{HashMap, HashSet}; use criterion::{criterion_group, criterion_main, Criterion}; -use namada::core::types::account::AccountPublicKeysMap; -use namada::core::types::address; +use namada::core::account::AccountPublicKeysMap; +use namada::core::address; use namada::ledger::storage::DB; use namada::token::{Amount, Transfer}; use namada::tx::Signature; @@ -21,7 +21,7 @@ fn tx_section_signature_validation(c: &mut Criterion) { let transfer_data = Transfer { source: defaults::albert_address(), target: defaults::bertha_address(), - token: address::nam(), + token: address::testing::nam(), amount: Amount::native_whole(500).native_denominated(), key: None, shielded: None, @@ -183,7 +183,7 @@ fn write_log_read(c: &mut Criterion) { let mut shell = BenchShell::default(); for (key, value_len) in generate_random_keys_sized() { - let key = namada::core::types::storage::Key::parse(key).unwrap(); + let key = namada::core::storage::Key::parse(key).unwrap(); // Extract the throughput, together with the wall-time, so that we can // than invert it to calculate the desired metric (time/byte) // NOTE: criterion states that the throughput is measured on the @@ -194,13 +194,13 @@ fn write_log_read(c: &mut Criterion) { group.throughput(criterion::Throughput::Bytes(throughput_len)); // Generate random bytes for the value and write it to storage let value: Vec = (0..value_len).map(|_| rand::random()).collect(); - shell.wl_storage.write_log.write(&key, value).unwrap(); + shell.state.write_log_mut().write(&key, value).unwrap(); group.bench_function( format!("key: {key}, bytes: {throughput_len}"), |b| { b.iter_with_large_drop(|| { - shell.wl_storage.write_log.read(&key).0.unwrap() + shell.state.write_log().read(&key).0.unwrap() }) }, ); @@ -214,7 +214,7 @@ fn storage_read(c: &mut Criterion) { let mut shell = BenchShell::default(); for (key, value_len) in generate_random_keys_sized() { - let key = namada::core::types::storage::Key::parse(key).unwrap(); + let key = namada::core::storage::Key::parse(key).unwrap(); // Extract the throughput, together with the wall-time, so that we can // than invert it to calculate the desired metric (time/byte) // NOTE: criterion states that the throughput is measured on the @@ -228,19 +228,13 @@ fn storage_read(c: &mut Criterion) { // NOTE: just like for storage writes, we don't have control on when // data is actually flushed to disk, so just benchmark the read function // without caring if data is actually in memory or on disk - shell.wl_storage.storage.write(&key, &value).unwrap(); + shell.state.db_write(&key, &value).unwrap(); group.bench_function( format!("key: {key}, bytes: {throughput_len}"), |b| { b.iter_with_large_drop(|| { - shell - .wl_storage - .storage - .db - .read_subspace_val(&key) - .unwrap() - .unwrap() + shell.state.db().read_subspace_val(&key).unwrap().unwrap() }) }, ); @@ -254,7 +248,7 @@ fn write_log_write(c: &mut Criterion) { let mut shell = BenchShell::default(); for (key, value_len) in generate_random_keys_sized() { - let key = namada::core::types::storage::Key::parse(key).unwrap(); + let key = namada::core::storage::Key::parse(key).unwrap(); // Extract the throughput, together with the wall-time, so that we can // than invert it to calculate the desired metric (time/byte) // NOTE: criterion states that the throughput is measured on the @@ -273,7 +267,7 @@ fn write_log_write(c: &mut Criterion) { (0..value_len).map(|_| rand::random()).collect() }, |value| { - shell.wl_storage.write_log.write(&key, value).unwrap() + shell.state.write_log_mut().write(&key, value).unwrap() }, criterion::BatchSize::SmallInput, ) @@ -289,7 +283,7 @@ fn storage_write(c: &mut Criterion) { let mut shell = BenchShell::default(); for (key, value_len) in generate_random_keys_sized() { - let key = namada::core::types::storage::Key::parse(key).unwrap(); + let key = namada::core::storage::Key::parse(key).unwrap(); // Extract the throughput, together with the wall-time, so that we can // than invert it to calculate the desired metric (time/byte) // NOTE: criterion states that the throughput is measured on the @@ -298,7 +292,7 @@ fn storage_write(c: &mut Criterion) { // so we set this as the throughput parameter let throughput_len = value_len + key.len() as u64; group.throughput(criterion::Throughput::Bytes(throughput_len)); - let block_height = shell.wl_storage.storage.block.height; + let block_height = shell.state.in_mem().block.height; group.bench_function( format!("key: {key}, bytes: {throughput_len}"), @@ -315,9 +309,8 @@ fn storage_write(c: &mut Criterion) { // just benchmark the write operation here without // focusing on the hardware write shell - .wl_storage - .storage - .db + .state + .db_mut() .write_subspace_val(block_height, &key, value, true) .unwrap(); }, diff --git a/crates/benches/native_vps.rs b/crates/benches/native_vps.rs index 5f0524699b..e9c9708b50 100644 --- a/crates/benches/native_vps.rs +++ b/crates/benches/native_vps.rs @@ -5,7 +5,9 @@ use std::str::FromStr; use criterion::{criterion_group, criterion_main, Criterion}; use masp_primitives::sapling::Node; -use namada::core::types::address::{self, Address}; +use namada::core::address::{self, Address, InternalAddress}; +use namada::core::eth_bridge_pool::{GasFee, PendingTransfer}; +use namada::core::masp::{TransferSource, TransferTarget}; use namada::eth_bridge::storage::whitelist; use namada::governance::pgf::storage::steward::StewardDetail; use namada::governance::storage::proposal::ProposalType; @@ -44,9 +46,7 @@ use namada::sdk::masp_primitives::transaction::Transaction; use namada::state::{Epoch, StorageRead, StorageWrite, TxIndex}; use namada::token::{Amount, Transfer}; use namada::tx::{Code, Section, Tx}; -use namada::types::address::InternalAddress; -use namada::types::eth_bridge_pool::{GasFee, PendingTransfer}; -use namada::types::masp::{TransferSource, TransferTarget}; +use namada::validity_predicate::VpSentinel; use namada_apps::bench_utils::{ generate_foreign_key_tx, BenchShell, BenchShieldedCtx, ALBERT_PAYMENT_ADDRESS, ALBERT_SPENDING_KEY, BERTHA_PAYMENT_ADDRESS, @@ -107,13 +107,13 @@ fn governance(c: &mut Criterion) { let content_section = Section::ExtraData(Code::new(vec![], None)); let params = - proof_of_stake::storage::read_pos_params(&shell.wl_storage) + proof_of_stake::storage::read_pos_params(&shell.state) .unwrap(); let voting_start_epoch = Epoch(2 + params.pipeline_len + params.unbonding_len); // Must start after current epoch debug_assert_eq!( - shell.wl_storage.get_block_epoch().unwrap().next(), + shell.state.get_block_epoch().unwrap().next(), voting_start_epoch ); shell.generate_tx( @@ -138,12 +138,12 @@ fn governance(c: &mut Criterion) { let max_proposal_content_key = namada::governance::storage::keys::get_max_proposal_content_key(); let max_code_size: u64 = shell - .wl_storage + .state .read(&max_code_size_key) .expect("Error while reading from storage") .expect("Missing max_code_size parameter in storage"); let max_proposal_content_size: u64 = shell - .wl_storage + .state .read(&max_proposal_content_key) .expect("Error while reading from storage") .expect( @@ -159,13 +159,13 @@ fn governance(c: &mut Criterion) { )); let params = - proof_of_stake::storage::read_pos_params(&shell.wl_storage) + proof_of_stake::storage::read_pos_params(&shell.state) .unwrap(); let voting_start_epoch = Epoch(2 + params.pipeline_len + params.unbonding_len); // Must start after current epoch debug_assert_eq!( - shell.wl_storage.get_block_epoch().unwrap().next(), + shell.state.get_block_epoch().unwrap().next(), voting_start_epoch ); shell.generate_tx( @@ -193,20 +193,22 @@ fn governance(c: &mut Criterion) { shell.execute_tx(&signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let governance = GovernanceVp { ctx: Ctx::new( &Address::Internal(InternalAddress::Governance), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, &signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -266,15 +268,15 @@ fn governance(c: &mut Criterion) { // shell.execute_tx(&tx); // let (verifiers, keys_changed) = shell -// .wl_storage +// .state // .write_log // .verifiers_and_changed_keys(&BTreeSet::default()); // let slash_fund = SlashFundVp { // ctx: Ctx::new( // &Address::Internal(InternalAddress::SlashFund), -// &shell.wl_storage.storage, -// &shell.wl_storage.write_log, +// &shell.state.storage, +// &shell.state.write_log, // &tx, // &TxIndex(0), // @@ -353,7 +355,7 @@ fn ibc(c: &mut Criterion) { match bench_name { "open_connection" => { let _ = shell.init_ibc_client_state( - namada::core::types::storage::Key::from( + namada::core::storage::Key::from( Address::Internal(InternalAddress::Ibc).to_db_key(), ), ); @@ -367,20 +369,22 @@ fn ibc(c: &mut Criterion) { shell.execute_tx(signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let ibc = Ibc { ctx: Ctx::new( &Address::Internal(InternalAddress::Ibc), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -416,7 +420,7 @@ fn vp_multitoken(c: &mut Criterion) { Transfer { source: defaults::albert_address(), target: defaults::bertha_address(), - token: address::nam(), + token: address::testing::nam(), amount: Amount::native_whole(1000).native_denominated(), key: None, shielded: None, @@ -433,20 +437,22 @@ fn vp_multitoken(c: &mut Criterion) { let mut shell = BenchShell::default(); shell.execute_tx(signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let multitoken = MultitokenVp { ctx: Ctx::new( &Address::Internal(InternalAddress::Multitoken), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -503,20 +509,12 @@ fn setup_storage_for_masp_verification( // Update the anchor in storage let tree_key = namada::token::storage_key::masp_commitment_tree_key(); - let updated_tree: CommitmentTree = shielded_ctx - .shell - .wl_storage - .read(&tree_key) - .unwrap() - .unwrap(); + let updated_tree: CommitmentTree = + shielded_ctx.shell.state.read(&tree_key).unwrap().unwrap(); let anchor_key = namada::token::storage_key::masp_commitment_anchor_key( updated_tree.root(), ); - shielded_ctx - .shell - .wl_storage - .write(&anchor_key, ()) - .unwrap(); + shielded_ctx.shell.state.write(&anchor_key, ()).unwrap(); shielded_ctx.shell.commit_block(); let (mut shielded_ctx, signed_tx) = match bench_name { @@ -551,20 +549,22 @@ fn masp(c: &mut Criterion) { setup_storage_for_masp_verification(bench_name); let (verifiers, keys_changed) = shielded_ctx .shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let masp = MaspVp { ctx: Ctx::new( &Address::Internal(InternalAddress::Masp), - &shielded_ctx.shell.wl_storage.storage, - &shielded_ctx.shell.wl_storage.write_log, + &shielded_ctx.shell.state, &signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter( - &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shielded_ctx.shell.vp_wasm_cache.clone(), @@ -626,7 +626,7 @@ fn pgf(c: &mut Criterion) { let mut shell = BenchShell::default(); namada::governance::pgf::storage::keys::stewards_handle() .insert( - &mut shell.wl_storage, + &mut shell.state, defaults::albert_address(), StewardDetail::base(defaults::albert_address()), ) @@ -648,7 +648,7 @@ fn pgf(c: &mut Criterion) { steward: defaults::albert_address(), commission: HashMap::from([( defaults::albert_address(), - namada::types::dec::Dec::zero(), + namada::core::dec::Dec::zero(), )]), }; shell.generate_tx( @@ -666,20 +666,22 @@ fn pgf(c: &mut Criterion) { shell.execute_tx(&signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let pgf = PgfVp { ctx: Ctx::new( &Address::Internal(InternalAddress::Pgf), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, &signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -705,24 +707,24 @@ fn pgf(c: &mut Criterion) { fn eth_bridge_nut(c: &mut Criterion) { let mut shell = BenchShell::default(); - let native_erc20_addres = - read_native_erc20_address(&shell.wl_storage).unwrap(); + let native_erc20_addres = read_native_erc20_address(&shell.state).unwrap(); let signed_tx = { - let data = PendingTransfer{ - transfer: namada::types::eth_bridge_pool::TransferToEthereum { - kind: namada::types::eth_bridge_pool::TransferToEthereumKind::Erc20, - asset: native_erc20_addres, - recipient: namada::types::ethereum_events::EthAddress([1u8; 20]), - sender: defaults::albert_address(), - amount: Amount::from(1), - }, - gas_fee: GasFee{ - amount: Amount::from(100), - payer: defaults::albert_address(), - token: shell.wl_storage.storage.native_token.clone(), - }, - }; + let data = PendingTransfer { + transfer: namada::core::eth_bridge_pool::TransferToEthereum { + kind: + namada::core::eth_bridge_pool::TransferToEthereumKind::Erc20, + asset: native_erc20_addres, + recipient: namada::core::ethereum_events::EthAddress([1u8; 20]), + sender: defaults::albert_address(), + amount: Amount::from(1), + }, + gas_fee: GasFee { + amount: Amount::from(100), + payer: defaults::albert_address(), + token: shell.state.in_mem().native_token.clone(), + }, + }; shell.generate_tx( TX_BRIDGE_POOL_WASM, data, @@ -736,22 +738,24 @@ fn eth_bridge_nut(c: &mut Criterion) { shell.execute_tx(&signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); let vp_address = Address::Internal(InternalAddress::Nut(native_erc20_addres)); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let nut = NonUsableTokens { ctx: Ctx::new( &vp_address, - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, &signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -774,24 +778,24 @@ fn eth_bridge_nut(c: &mut Criterion) { fn eth_bridge(c: &mut Criterion) { let mut shell = BenchShell::default(); - let native_erc20_addres = - read_native_erc20_address(&shell.wl_storage).unwrap(); + let native_erc20_addres = read_native_erc20_address(&shell.state).unwrap(); let signed_tx = { - let data = PendingTransfer{ - transfer: namada::types::eth_bridge_pool::TransferToEthereum { - kind: namada::types::eth_bridge_pool::TransferToEthereumKind::Erc20, - asset: native_erc20_addres, - recipient: namada::types::ethereum_events::EthAddress([1u8; 20]), - sender: defaults::albert_address(), - amount: Amount::from(1), - }, - gas_fee: GasFee{ - amount: Amount::from(100), - payer: defaults::albert_address(), - token: shell.wl_storage.storage.native_token.clone(), - }, - }; + let data = PendingTransfer { + transfer: namada::core::eth_bridge_pool::TransferToEthereum { + kind: + namada::core::eth_bridge_pool::TransferToEthereumKind::Erc20, + asset: native_erc20_addres, + recipient: namada::core::ethereum_events::EthAddress([1u8; 20]), + sender: defaults::albert_address(), + amount: Amount::from(1), + }, + gas_fee: GasFee { + amount: Amount::from(100), + payer: defaults::albert_address(), + token: shell.state.in_mem().native_token.clone(), + }, + }; shell.generate_tx( TX_BRIDGE_POOL_WASM, data, @@ -805,21 +809,23 @@ fn eth_bridge(c: &mut Criterion) { shell.execute_tx(&signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); let vp_address = Address::Internal(InternalAddress::EthBridge); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let eth_bridge = EthBridge { ctx: Ctx::new( &vp_address, - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, &signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -846,8 +852,7 @@ fn eth_bridge_pool(c: &mut Criterion) { // numerous accesses to storage that we already account for, so no need to // benchmark specific sections of it like for the ibc native vp let mut shell = BenchShell::default(); - let native_erc20_addres = - read_native_erc20_address(&shell.wl_storage).unwrap(); + let native_erc20_addres = read_native_erc20_address(&shell.state).unwrap(); // Whitelist NAM token let cap_key = whitelist::Key { @@ -855,40 +860,38 @@ fn eth_bridge_pool(c: &mut Criterion) { suffix: whitelist::KeyType::Cap, } .into(); - shell - .wl_storage - .write(&cap_key, Amount::from(1_000)) - .unwrap(); + shell.state.write(&cap_key, Amount::from(1_000)).unwrap(); let whitelisted_key = whitelist::Key { asset: native_erc20_addres, suffix: whitelist::KeyType::Whitelisted, } .into(); - shell.wl_storage.write(&whitelisted_key, true).unwrap(); + shell.state.write(&whitelisted_key, true).unwrap(); let denom_key = whitelist::Key { asset: native_erc20_addres, suffix: whitelist::KeyType::Denomination, } .into(); - shell.wl_storage.write(&denom_key, 0).unwrap(); + shell.state.write(&denom_key, 0).unwrap(); let signed_tx = { - let data = PendingTransfer{ - transfer: namada::types::eth_bridge_pool::TransferToEthereum { - kind: namada::types::eth_bridge_pool::TransferToEthereumKind::Erc20, - asset: native_erc20_addres, - recipient: namada::types::ethereum_events::EthAddress([1u8; 20]), - sender: defaults::albert_address(), - amount: Amount::from(1), - }, - gas_fee: GasFee{ - amount: Amount::from(100), - payer: defaults::albert_address(), - token: shell.wl_storage.storage.native_token.clone(), - }, - }; + let data = PendingTransfer { + transfer: namada::core::eth_bridge_pool::TransferToEthereum { + kind: + namada::core::eth_bridge_pool::TransferToEthereumKind::Erc20, + asset: native_erc20_addres, + recipient: namada::core::ethereum_events::EthAddress([1u8; 20]), + sender: defaults::albert_address(), + amount: Amount::from(1), + }, + gas_fee: GasFee { + amount: Amount::from(100), + payer: defaults::albert_address(), + token: shell.state.in_mem().native_token.clone(), + }, + }; shell.generate_tx( TX_BRIDGE_POOL_WASM, data, @@ -902,21 +905,23 @@ fn eth_bridge_pool(c: &mut Criterion) { shell.execute_tx(&signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); let vp_address = Address::Internal(InternalAddress::EthBridgePool); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let bridge_pool = BridgePoolVp { ctx: Ctx::new( &vp_address, - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, &signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -955,13 +960,10 @@ fn parameters(c: &mut Criterion) { // Simulate governance proposal to modify a parameter let min_proposal_fund_key = namada::governance::storage::keys::get_min_proposal_fund_key(); - shell - .wl_storage - .write(&min_proposal_fund_key, 1_000) - .unwrap(); + shell.state.write(&min_proposal_fund_key, 1_000).unwrap(); let proposal_key = namada::governance::storage::keys::get_proposal_execution_key(0); - shell.wl_storage.write(&proposal_key, 0).unwrap(); + shell.state.write(&proposal_key, 0).unwrap(); // Return a dummy tx for validation let mut tx = @@ -975,21 +977,23 @@ fn parameters(c: &mut Criterion) { }; let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); let vp_address = Address::Internal(InternalAddress::Parameters); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let parameters = ParametersVp { ctx: Ctx::new( &vp_address, - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, &signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -1031,13 +1035,10 @@ fn pos(c: &mut Criterion) { // Simulate governance proposal to modify a parameter let min_proposal_fund_key = namada::governance::storage::keys::get_min_proposal_fund_key(); - shell - .wl_storage - .write(&min_proposal_fund_key, 1_000) - .unwrap(); + shell.state.write(&min_proposal_fund_key, 1_000).unwrap(); let proposal_key = namada::governance::storage::keys::get_proposal_execution_key(0); - shell.wl_storage.write(&proposal_key, 0).unwrap(); + shell.state.write(&proposal_key, 0).unwrap(); // Return a dummy tx for validation let mut tx = @@ -1051,21 +1052,23 @@ fn pos(c: &mut Criterion) { }; let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); let vp_address = Address::Internal(InternalAddress::PoS); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let pos = PosVP { ctx: Ctx::new( &vp_address, - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, &signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -1137,7 +1140,7 @@ fn ibc_vp_validate_action(c: &mut Criterion) { match bench_name { "open_connection" => { let _ = shell.init_ibc_client_state( - namada::core::types::storage::Key::from( + namada::core::storage::Key::from( Address::Internal(InternalAddress::Ibc).to_db_key(), ), ); @@ -1152,20 +1155,22 @@ fn ibc_vp_validate_action(c: &mut Criterion) { shell.execute_tx(signed_tx); let tx_data = signed_tx.data().unwrap(); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let ibc = Ibc { ctx: Ctx::new( &Address::Internal(InternalAddress::Ibc), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -1235,7 +1240,7 @@ fn ibc_vp_execute_action(c: &mut Criterion) { match bench_name { "open_connection" => { let _ = shell.init_ibc_client_state( - namada::core::types::storage::Key::from( + namada::core::storage::Key::from( Address::Internal(InternalAddress::Ibc).to_db_key(), ), ); @@ -1250,20 +1255,22 @@ fn ibc_vp_execute_action(c: &mut Criterion) { shell.execute_tx(signed_tx); let tx_data = signed_tx.data().unwrap(); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let ibc = Ibc { ctx: Ctx::new( &Address::Internal(InternalAddress::Ibc), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), diff --git a/crates/benches/process_wrapper.rs b/crates/benches/process_wrapper.rs index 4a12b34836..da26796e78 100644 --- a/crates/benches/process_wrapper.rs +++ b/crates/benches/process_wrapper.rs @@ -1,12 +1,11 @@ use criterion::{criterion_group, criterion_main, Criterion}; -use namada::core::types::address; -use namada::ledger::storage::TempWlStorage; +use namada::core::address; +use namada::core::key::RefTo; +use namada::core::storage::BlockHeight; +use namada::core::time::DateTimeUtc; use namada::token::{Amount, DenominatedAmount, Transfer}; use namada::tx::data::{Fee, WrapperTx}; use namada::tx::Signature; -use namada::types::key::RefTo; -use namada::types::storage::BlockHeight; -use namada::types::time::DateTimeUtc; use namada_apps::bench_utils::{BenchShell, TX_TRANSFER_WASM}; use namada_apps::node::ledger::shell::process_proposal::ValidationMeta; use namada_apps::wallet::defaults; @@ -15,7 +14,7 @@ fn process_tx(c: &mut Criterion) { let mut shell = BenchShell::default(); // Advance chain height to allow the inclusion of wrapper txs by the block // space allocator - shell.wl_storage.storage.last_block.as_mut().unwrap().height = + shell.state.in_mem_mut().last_block.as_mut().unwrap().height = BlockHeight(2); let mut tx = shell.generate_tx( @@ -23,7 +22,7 @@ fn process_tx(c: &mut Criterion) { Transfer { source: defaults::albert_address(), target: defaults::bertha_address(), - token: address::nam(), + token: address::testing::nam(), amount: Amount::native_whole(1).native_denominated(), key: None, shielded: None, @@ -36,7 +35,7 @@ fn process_tx(c: &mut Criterion) { tx.update_header(namada::tx::data::TxType::Wrapper(Box::new( WrapperTx::new( Fee { - token: address::nam(), + token: address::testing::nam(), amount_per_gas_unit: DenominatedAmount::native(1.into()), }, defaults::albert_keypair().ref_to(), @@ -60,10 +59,10 @@ fn process_tx(c: &mut Criterion) { b.iter_batched( || { ( - shell.wl_storage.storage.tx_queue.clone(), + shell.state.in_mem().tx_queue.clone(), // Prevent block out of gas and replay protection - TempWlStorage::new(&shell.wl_storage.storage), - ValidationMeta::from(&shell.wl_storage), + shell.state.with_temp_write_log(), + ValidationMeta::from(shell.state.read_only()), shell.vp_wasm_cache.clone(), shell.tx_wasm_cache.clone(), defaults::daewon_address(), @@ -71,7 +70,7 @@ fn process_tx(c: &mut Criterion) { }, |( tx_queue, - mut temp_wl_storage, + mut temp_state, mut validation_meta, mut vp_wasm_cache, mut tx_wasm_cache, @@ -84,7 +83,7 @@ fn process_tx(c: &mut Criterion) { &wrapper, &mut tx_queue.iter(), &mut validation_meta, - &mut temp_wl_storage, + &mut temp_state, datetime, &mut vp_wasm_cache, &mut tx_wasm_cache, diff --git a/crates/benches/txs.rs b/crates/benches/txs.rs index 6670fd2420..73c0faffbf 100644 --- a/crates/benches/txs.rs +++ b/crates/benches/txs.rs @@ -3,9 +3,15 @@ use std::str::FromStr; use criterion::{criterion_group, criterion_main, Criterion}; use namada::account::{InitAccount, UpdateAccount}; -use namada::core::types::key::{ - common, SecretKey as SecretKeyInterface, SigScheme, +use namada::core::address::{self, Address}; +use namada::core::eth_bridge_pool::{GasFee, PendingTransfer}; +use namada::core::hash::Hash; +use namada::core::key::{ + common, ed25519, secp256k1, PublicKey, RefTo, + SecretKey as SecretKeyInterface, SigScheme, }; +use namada::core::masp::{TransferSource, TransferTarget}; +use namada::core::storage::Key; use namada::governance::pgf::storage::steward::StewardDetail; use namada::governance::storage::proposal::ProposalType; use namada::governance::storage::vote::ProposalVote; @@ -31,12 +37,6 @@ use namada::tx::data::pos::{ MetaDataChange, Redelegation, Withdraw, }; use namada::tx::{Code, Section}; -use namada::types::address::{self, Address}; -use namada::types::eth_bridge_pool::{GasFee, PendingTransfer}; -use namada::types::hash::Hash; -use namada::types::key::{ed25519, secp256k1, PublicKey, RefTo}; -use namada::types::masp::{TransferSource, TransferTarget}; -use namada::types::storage::Key; use namada_apps::bench_utils::{ BenchShell, BenchShieldedCtx, ALBERT_PAYMENT_ADDRESS, ALBERT_SPENDING_KEY, BERTHA_PAYMENT_ADDRESS, TX_BECOME_VALIDATOR_WASM, TX_BOND_WASM, @@ -278,13 +278,12 @@ fn withdraw(c: &mut Criterion) { }; shell.execute_tx(&unbond_tx); - shell.wl_storage.commit_tx(); + shell.state.commit_tx(); // Advance Epoch for pipeline and unbonding length - let params = proof_of_stake::storage::read_pos_params( - &shell.wl_storage, - ) - .unwrap(); + let params = + proof_of_stake::storage::read_pos_params(&shell.state) + .unwrap(); let advance_epochs = params.pipeline_len + params.unbonding_len; @@ -326,8 +325,8 @@ fn redelegate(c: &mut Criterion) { || { let shell = BenchShell::default(); // Find the other genesis validator - let current_epoch = shell.wl_storage.get_block_epoch().unwrap(); - let validators = namada::proof_of_stake::storage::read_consensus_validator_set_addresses(&shell.inner.wl_storage, current_epoch).unwrap(); + let current_epoch = shell.state.get_block_epoch().unwrap(); + let validators = namada::proof_of_stake::storage::read_consensus_validator_set_addresses(&shell.inner.state, current_epoch).unwrap(); let validator_2 = validators.into_iter().find(|addr| addr != &defaults::validator_address()).expect("There must be another validator to redelegate to"); // Prepare the redelegation tx (shell, redelegation(validator_2)) @@ -458,7 +457,7 @@ fn init_proposal(c: &mut Criterion) { let max_proposal_content_key = namada::governance::storage::keys::get_max_proposal_content_key(); let max_code_size: u64 = shell - .wl_storage + .state .read(&max_code_size_key) .expect("Error while reading from storage") .expect( @@ -466,7 +465,7 @@ fn init_proposal(c: &mut Criterion) { storage", ); let max_proposal_content_size: u64 = shell - .wl_storage + .state .read(&max_proposal_content_key) .expect("Error while reading from storage") .expect( @@ -596,8 +595,8 @@ fn become_validator(c: &mut Criterion) { eth_cold_key, eth_hot_key, protocol_key, - commission_rate: namada::types::dec::Dec::default(), - max_commission_rate_change: namada::types::dec::Dec::default(), + commission_rate: namada::core::dec::Dec::default(), + max_commission_rate_change: namada::core::dec::Dec::default(), email: "null@null.net".to_string(), description: None, website: None, @@ -624,9 +623,9 @@ fn become_validator(c: &mut Criterion) { let mut shell = BenchShell::default(); // Initialize the account to be able to use it shell - .wl_storage + .state .write_bytes( - &namada::types::storage::Key::validity_predicate( + &namada::core::storage::Key::validity_predicate( &address, ), vec![], @@ -646,7 +645,7 @@ fn change_validator_commission(c: &mut Criterion) { TX_CHANGE_VALIDATOR_COMMISSION_WASM, CommissionChange { validator: defaults::validator_address(), - new_rate: namada::types::dec::Dec::new(6, 2).unwrap(), + new_rate: namada::core::dec::Dec::new(6, 2).unwrap(), }, None, None, @@ -773,8 +772,8 @@ fn ibc(c: &mut Criterion) { match bench_name { "open_connection" => { let _ = shell.init_ibc_client_state( - namada::core::types::storage::Key::from( - Address::Internal(namada::types::address::InternalAddress::Ibc).to_db_key(), + namada::core::storage::Key::from( + Address::Internal(namada::core::address::InternalAddress::Ibc).to_db_key(), ), ); } @@ -811,11 +810,11 @@ fn unjail_validator(c: &mut Criterion) { let mut shell = BenchShell::default(); // Jail the validator - let pos_params = read_pos_params(&shell.wl_storage).unwrap(); - let current_epoch = shell.wl_storage.storage.block.epoch; + let pos_params = read_pos_params(&shell.state).unwrap(); + let current_epoch = shell.state.in_mem().block.epoch; let evidence_epoch = current_epoch.prev(); proof_of_stake::slashing::slash( - &mut shell.wl_storage, + &mut shell.state, &pos_params, current_epoch, evidence_epoch, @@ -826,7 +825,7 @@ fn unjail_validator(c: &mut Criterion) { ) .unwrap(); - shell.wl_storage.commit_tx(); + shell.state.commit_tx(); shell.commit_block(); // Advance by slash epoch offset epochs for _ in 0..=pos_params.slash_processing_epoch_offset() { @@ -845,17 +844,17 @@ fn tx_bridge_pool(c: &mut Criterion) { let shell = BenchShell::default(); let data = PendingTransfer { - transfer: namada::types::eth_bridge_pool::TransferToEthereum { - kind: namada::types::eth_bridge_pool::TransferToEthereumKind::Erc20, - asset: read_native_erc20_address(&shell.wl_storage).unwrap(), - recipient: namada::types::ethereum_events::EthAddress([1u8; 20]), + transfer: namada::core::eth_bridge_pool::TransferToEthereum { + kind: namada::core::eth_bridge_pool::TransferToEthereumKind::Erc20, + asset: read_native_erc20_address(&shell.state).unwrap(), + recipient: namada::core::ethereum_events::EthAddress([1u8; 20]), sender: defaults::albert_address(), amount: Amount::from(1), }, gas_fee: GasFee { amount: Amount::from(100), payer: defaults::albert_address(), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, }; let tx = shell.generate_tx( @@ -881,7 +880,7 @@ fn resign_steward(c: &mut Criterion) { let mut shell = BenchShell::default(); namada::governance::pgf::storage::keys::stewards_handle() .insert( - &mut shell.wl_storage, + &mut shell.state, defaults::albert_address(), StewardDetail::base(defaults::albert_address()), ) @@ -910,7 +909,7 @@ fn update_steward_commission(c: &mut Criterion) { let mut shell = BenchShell::default(); namada::governance::pgf::storage::keys::stewards_handle() .insert( - &mut shell.wl_storage, + &mut shell.state, defaults::albert_address(), StewardDetail::base(defaults::albert_address()), ) @@ -920,7 +919,7 @@ fn update_steward_commission(c: &mut Criterion) { steward: defaults::albert_address(), commission: HashMap::from([( defaults::albert_address(), - namada::types::dec::Dec::zero(), + namada::core::dec::Dec::zero(), )]), }; let tx = shell.generate_tx( @@ -974,16 +973,16 @@ fn reactivate_validator(c: &mut Criterion) { let mut shell = BenchShell::default(); // Deactivate the validator - let pos_params = read_pos_params(&shell.wl_storage).unwrap(); - let current_epoch = shell.wl_storage.storage.block.epoch; + let pos_params = read_pos_params(&shell.state).unwrap(); + let current_epoch = shell.state.in_mem().block.epoch; proof_of_stake::deactivate_validator( - &mut shell.wl_storage, + &mut shell.state, &defaults::validator_address(), current_epoch, ) .unwrap(); - shell.wl_storage.commit_tx(); + shell.state.commit_tx(); shell.commit_block(); // Advance by slash epoch offset epochs for _ in 0..=pos_params.pipeline_len { @@ -1033,10 +1032,9 @@ fn claim_rewards(c: &mut Criterion) { let mut shell = BenchShell::default(); // Advance Epoch for pipeline and unbonding length - let params = proof_of_stake::storage::read_pos_params( - &shell.wl_storage, - ) - .unwrap(); + let params = + proof_of_stake::storage::read_pos_params(&shell.state) + .unwrap(); let advance_epochs = params.pipeline_len + params.unbonding_len; diff --git a/crates/benches/vps.rs b/crates/benches/vps.rs index d95c3f5747..3d320c042e 100644 --- a/crates/benches/vps.rs +++ b/crates/benches/vps.rs @@ -1,19 +1,19 @@ +use std::cell::RefCell; use std::collections::BTreeSet; use criterion::{criterion_group, criterion_main, Criterion}; use namada::account::UpdateAccount; -use namada::core::types::address::{self, Address}; -use namada::core::types::key::{ - common, SecretKey as SecretKeyInterface, SigScheme, +use namada::core::address::{self, Address}; +use namada::core::hash::Hash; +use namada::core::key::{ + common, ed25519, SecretKey as SecretKeyInterface, SigScheme, }; +use namada::core::storage::{Key, TxIndex}; use namada::governance::storage::vote::ProposalVote; use namada::governance::VoteProposalData; use namada::ledger::gas::{TxGasMeter, VpGasMeter}; use namada::token::{Amount, Transfer}; use namada::tx::data::pos::{Bond, CommissionChange}; -use namada::types::hash::Hash; -use namada::types::key::ed25519; -use namada::types::storage::{Key, TxIndex}; use namada::vm::wasm::run; use namada_apps::bench_utils::{ generate_foreign_key_tx, BenchShell, TX_BOND_WASM, @@ -40,7 +40,7 @@ fn vp_user(c: &mut Criterion) { Transfer { source: defaults::albert_address(), target: defaults::bertha_address(), - token: address::nam(), + token: address::testing::nam(), amount: Amount::native_whole(1000).native_denominated(), key: None, shielded: None, @@ -55,7 +55,7 @@ fn vp_user(c: &mut Criterion) { Transfer { source: defaults::bertha_address(), target: defaults::albert_address(), - token: address::nam(), + token: address::testing::nam(), amount: Amount::native_whole(1000).native_denominated(), key: None, shielded: None, @@ -123,12 +123,15 @@ fn vp_user(c: &mut Criterion) { let mut shell = BenchShell::default(); shell.execute_tx(signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); group.bench_function(bench_name, |b| { b.iter(|| { + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); assert!( // NOTE: the wasm code is always in cache so we don't // include here the cost to read and compile the vp code @@ -137,11 +140,8 @@ fn vp_user(c: &mut Criterion) { signed_tx, &TxIndex(0), &defaults::albert_address(), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, - &mut VpGasMeter::new_from_tx_meter( - &TxGasMeter::new_from_sub_limit(u64::MAX.into()) - ), + &shell.state, + &gas_meter, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -173,7 +173,7 @@ fn vp_implicit(c: &mut Criterion) { Transfer { source: Address::from(&implicit_account.to_public()), target: defaults::bertha_address(), - token: address::nam(), + token: address::testing::nam(), amount: Amount::native_whole(500).native_denominated(), key: None, shielded: None, @@ -188,7 +188,7 @@ fn vp_implicit(c: &mut Criterion) { Transfer { source: defaults::bertha_address(), target: Address::from(&implicit_account.to_public()), - token: address::nam(), + token: address::testing::nam(), amount: Amount::native_whole(1000).native_denominated(), key: None, shielded: None, @@ -200,7 +200,7 @@ fn vp_implicit(c: &mut Criterion) { let reveal_pk = shell.generate_tx( TX_REVEAL_PK_WASM, - &implicit_account.to_public(), + implicit_account.to_public(), None, None, vec![], @@ -257,37 +257,37 @@ fn vp_implicit(c: &mut Criterion) { if bench_name != "reveal_pk" { // Reveal public key shell.execute_tx(&reveal_pk); - shell.wl_storage.commit_tx(); + shell.state.commit_tx(); shell.commit_block(); } if bench_name == "transfer" || bench_name == "pos" { // Transfer some tokens to the implicit address shell.execute_tx(&received_transfer); - shell.wl_storage.commit_tx(); + shell.state.commit_tx(); shell.commit_block(); } // Run the tx to validate shell.execute_tx(tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); group.bench_function(bench_name, |b| { b.iter(|| { + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); assert!( run::vp( vp_code_hash, tx, &TxIndex(0), &Address::from(&implicit_account.to_public()), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, - &mut VpGasMeter::new_from_tx_meter( - &TxGasMeter::new_from_sub_limit(u64::MAX.into()) - ), + &shell.state, + &gas_meter, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -316,7 +316,7 @@ fn vp_validator(c: &mut Criterion) { Transfer { source: defaults::validator_address(), target: defaults::bertha_address(), - token: address::nam(), + token: address::testing::nam(), amount: Amount::native_whole(1000).native_denominated(), key: None, shielded: None, @@ -331,7 +331,7 @@ fn vp_validator(c: &mut Criterion) { Transfer { source: defaults::bertha_address(), target: defaults::validator_address(), - token: address::nam(), + token: address::testing::nam(), amount: Amount::native_whole(1000).native_denominated(), key: None, shielded: None, @@ -358,7 +358,7 @@ fn vp_validator(c: &mut Criterion) { TX_CHANGE_VALIDATOR_COMMISSION_WASM, CommissionChange { validator: defaults::validator_address(), - new_rate: namada::types::dec::Dec::new(6, 2).unwrap(), + new_rate: namada::core::dec::Dec::new(6, 2).unwrap(), }, None, None, @@ -413,23 +413,23 @@ fn vp_validator(c: &mut Criterion) { shell.execute_tx(signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); group.bench_function(bench_name, |b| { b.iter(|| { + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); assert!( run::vp( vp_code_hash, signed_tx, &TxIndex(0), &defaults::validator_address(), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, - &mut VpGasMeter::new_from_tx_meter( - &TxGasMeter::new_from_sub_limit(u64::MAX.into()) - ), + &shell.state, + &gas_meter, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index 67271d6728..bb4437f87d 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -46,14 +46,12 @@ index-set.workspace = true k256.workspace = true masp_primitives.workspace = true num256.workspace = true -num-derive.workspace = true num_enum = "0.7.0" num-integer = "0.1.45" num-rational.workspace = true num-traits.workspace = true primitive-types.workspace = true proptest = {workspace = true, optional = true} -prost.workspace = true prost-types.workspace = true rand = {version = "0.8", optional = true} rand_core = {version = "0.6", optional = true} diff --git a/crates/core/src/types/account.rs b/crates/core/src/account.rs similarity index 100% rename from crates/core/src/types/account.rs rename to crates/core/src/account.rs diff --git a/crates/core/src/types/address.rs b/crates/core/src/address.rs similarity index 90% rename from crates/core/src/types/address.rs rename to crates/core/src/address.rs index 7626d3c0b6..1a6b524dba 100644 --- a/crates/core/src/types/address.rs +++ b/crates/core/src/address.rs @@ -3,7 +3,6 @@ mod raw; -use std::collections::HashMap; use std::fmt::{Debug, Display}; use std::hash::Hash; use std::str::FromStr; @@ -14,13 +13,11 @@ use data_encoding::HEXUPPER; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; +use crate::ethereum_events::EthAddress; use crate::ibc::primitives::Signer; -use crate::impl_display_and_from_str_via_format; -use crate::types::ethereum_events::EthAddress; -use crate::types::ibc::IbcTokenHash; -use crate::types::key::PublicKeyHash; -use crate::types::token::Denomination; -use crate::types::{key, string_encoding}; +use crate::ibc::IbcTokenHash; +use crate::key::PublicKeyHash; +use crate::{impl_display_and_from_str_via_format, key, string_encoding}; /// The length of an established [`Address`] encoded with Borsh. pub const ESTABLISHED_ADDRESS_BYTES_LEN: usize = 21; @@ -44,7 +41,7 @@ pub const HASH_LEN: usize = 20; /// use sha2::Digest; /// assert_eq!( /// sha2::Sha256::output_size(), -/// namada_core::types::address::SHA_HASH_LEN +/// namada_core::address::SHA_HASH_LEN /// ); /// ``` pub const SHA_HASH_LEN: usize = 32; @@ -64,6 +61,8 @@ pub const GOV: Address = Address::Internal(InternalAddress::Governance); pub const MASP: Address = Address::Internal(InternalAddress::Masp); /// Internal Multitoken address pub const MULTITOKEN: Address = Address::Internal(InternalAddress::Multitoken); +/// Internal Eth bridge address +pub const ETH_BRIDGE: Address = Address::Internal(InternalAddress::EthBridge); /// Error from decoding address from string pub type DecodeError = string_encoding::DecodeError; @@ -229,7 +228,7 @@ impl<'addr> From<&'addr Address> for raw::Address<'addr, raw::Validated> { // the order. impl PartialOrd for Address { fn partial_cmp(&self, other: &Self) -> Option { - self.encode().partial_cmp(&other.encode()) + Some(self.cmp(other)) } } @@ -364,8 +363,7 @@ impl TryFrom for Address { // sending a token from a spending key, it has been already // replaced with the MASP address. Address::decode(signer.as_ref()).or( - match crate::types::masp::PaymentAddress::from_str(signer.as_ref()) - { + match crate::masp::PaymentAddress::from_str(signer.as_ref()) { Ok(_) => Ok(MASP), Err(_) => Err(DecodeError::InvalidInnerEncoding(format!( "Invalid address for IBC transfer: {signer}" @@ -586,74 +584,6 @@ impl InternalAddress { } } -/// Temporary helper for testing -pub fn nam() -> Address { - Address::decode("tnam1q99c37u38grkdcc2qze0hz4zjjd8zr3yucd3mzgz") - .expect("The token address decoding shouldn't fail") -} - -/// Temporary helper for testing -pub fn btc() -> Address { - Address::decode("tnam1qy7jxng788scr4fdqxqxtc2ze2guq5478cml9cd9") - .expect("The token address decoding shouldn't fail") -} - -/// Temporary helper for testing -pub fn eth() -> Address { - Address::decode("tnam1qyr9vd8ltunq72qc7pk58v7jdsedt4mggqqpxs03") - .expect("The token address decoding shouldn't fail") -} - -/// Temporary helper for testing -pub fn dot() -> Address { - Address::decode("tnam1qx6k4wau5t6m8g2hjq55fje2ynpvh5t27s8p3p0l") - .expect("The token address decoding shouldn't fail") -} - -/// Temporary helper for testing -pub fn schnitzel() -> Address { - Address::decode("tnam1q9euzsu2qfv4y6p0dqaga20n0u0yp8c3ec006yg2") - .expect("The token address decoding shouldn't fail") -} - -/// Temporary helper for testing -pub fn apfel() -> Address { - Address::decode("tnam1qxlmdmw2y6hzvjg34zca8r6d4s6zmtkhty8myzu4") - .expect("The token address decoding shouldn't fail") -} - -/// Temporary helper for testing -pub fn kartoffel() -> Address { - Address::decode("tnam1q87teqzjytwa9xd9qk8u558xxnrwuzdjzs7zvhzr") - .expect("The token address decoding shouldn't fail") -} - -/// Temporary helper for testing -pub const fn wnam() -> EthAddress { - // TODO: Replace this with the real wNam ERC20 address once it exists - // "DEADBEEF DEADBEEF DEADBEEF DEADBEEF DEADBEEF" - EthAddress([ - 222, 173, 190, 239, 222, 173, 190, 239, 222, 173, 190, 239, 222, 173, - 190, 239, 222, 173, 190, 239, - ]) -} - -/// Temporary helper for testing, a hash map of tokens addresses with their -/// informal currency codes and number of decimal places. -pub fn tokens() -> HashMap<&'static str, Denomination> { - vec![ - ("nam", 6.into()), - ("btc", 8.into()), - ("eth", 18.into()), - ("dot", 10.into()), - ("schnitzel", 6.into()), - ("apfel", 6.into()), - ("kartoffel", 6.into()), - ] - .into_iter() - .collect() -} - #[cfg(test)] pub mod tests { use proptest::prelude::*; @@ -725,7 +655,7 @@ pub fn gen_established_address(seed: impl AsRef) -> Address { let mut key_gen = EstablishedAddressGen::new(seed); let mut rng: ThreadRng = thread_rng(); - let mut rng_bytes = vec![0u8; 32]; + let mut rng_bytes = [0u8; 32]; rng.fill_bytes(&mut rng_bytes[..]); let rng_source = rng_bytes .iter() @@ -745,10 +675,13 @@ pub fn gen_deterministic_established_address(seed: impl AsRef) -> Address { /// Helpers for testing with addresses. #[cfg(any(test, feature = "testing"))] pub mod testing { + use std::collections::HashMap; + use proptest::prelude::*; use super::*; - use crate::types::key::*; + use crate::key::*; + use crate::token::Denomination; /// Generate a new established address. pub fn gen_established_address() -> Address { @@ -897,14 +830,82 @@ pub mod testing { } fn arb_erc20() -> InternalAddress { - use crate::types::ethereum_events::testing::arbitrary_eth_address; + use crate::ethereum_events::testing::arbitrary_eth_address; // TODO: generate random erc20 addr data InternalAddress::Erc20(arbitrary_eth_address()) } fn arb_nut() -> InternalAddress { - use crate::types::ethereum_events::testing::arbitrary_eth_address; + use crate::ethereum_events::testing::arbitrary_eth_address; // TODO: generate random erc20 addr data InternalAddress::Nut(arbitrary_eth_address()) } + + /// NAM token address for testing + pub fn nam() -> Address { + Address::decode("tnam1q99c37u38grkdcc2qze0hz4zjjd8zr3yucd3mzgz") + .expect("The token address decoding shouldn't fail") + } + + /// BTC token address for testing + pub fn btc() -> Address { + Address::decode("tnam1qy7jxng788scr4fdqxqxtc2ze2guq5478cml9cd9") + .expect("The token address decoding shouldn't fail") + } + + /// ETH token address for testing + pub fn eth() -> Address { + Address::decode("tnam1qyr9vd8ltunq72qc7pk58v7jdsedt4mggqqpxs03") + .expect("The token address decoding shouldn't fail") + } + + /// DOT token address for testing + pub fn dot() -> Address { + Address::decode("tnam1qx6k4wau5t6m8g2hjq55fje2ynpvh5t27s8p3p0l") + .expect("The token address decoding shouldn't fail") + } + + /// Imaginary token address for testing + pub fn schnitzel() -> Address { + Address::decode("tnam1q9euzsu2qfv4y6p0dqaga20n0u0yp8c3ec006yg2") + .expect("The token address decoding shouldn't fail") + } + + /// Imaginary token address for testing + pub fn apfel() -> Address { + Address::decode("tnam1qxlmdmw2y6hzvjg34zca8r6d4s6zmtkhty8myzu4") + .expect("The token address decoding shouldn't fail") + } + + /// Imaginary token address for testing + pub fn kartoffel() -> Address { + Address::decode("tnam1q87teqzjytwa9xd9qk8u558xxnrwuzdjzs7zvhzr") + .expect("The token address decoding shouldn't fail") + } + + /// Imaginary eth address for testing + pub const fn wnam() -> EthAddress { + // TODO: Replace this with the real wNam ERC20 address once it exists + // "DEADBEEF DEADBEEF DEADBEEF DEADBEEF DEADBEEF" + EthAddress([ + 222, 173, 190, 239, 222, 173, 190, 239, 222, 173, 190, 239, 222, + 173, 190, 239, 222, 173, 190, 239, + ]) + } + + /// A hash map of tokens addresses with their informal currency codes and + /// number of decimal places. + pub fn tokens() -> HashMap<&'static str, Denomination> { + vec![ + ("nam", 6.into()), + ("btc", 8.into()), + ("eth", 18.into()), + ("dot", 10.into()), + ("schnitzel", 6.into()), + ("apfel", 6.into()), + ("kartoffel", 6.into()), + ] + .into_iter() + .collect() + } } diff --git a/crates/core/src/types/address/raw.rs b/crates/core/src/address/raw.rs similarity index 100% rename from crates/core/src/types/address/raw.rs rename to crates/core/src/address/raw.rs diff --git a/crates/core/src/types/chain.rs b/crates/core/src/chain.rs similarity index 100% rename from crates/core/src/types/chain.rs rename to crates/core/src/chain.rs diff --git a/crates/core/src/types/dec.rs b/crates/core/src/dec.rs similarity index 99% rename from crates/core/src/types/dec.rs rename to crates/core/src/dec.rs index 5b32b484e5..858e94f327 100644 --- a/crates/core/src/types/dec.rs +++ b/crates/core/src/dec.rs @@ -14,8 +14,8 @@ use num_traits::CheckedMul; use serde::{Deserialize, Serialize}; use super::token::NATIVE_MAX_DECIMAL_PLACES; -use crate::types::token::{Amount, Change}; -use crate::types::uint::{Uint, I256}; +use crate::token::{Amount, Change}; +use crate::uint::{Uint, I256}; /// The number of Dec places for PoS rational calculations pub const POS_DECIMAL_PRECISION: u8 = 12; @@ -69,7 +69,7 @@ impl Dec { /// /// Example: /// ``` - /// use namada_core::types::dec::Dec; + /// use namada_core::dec::Dec; /// /// let x = Dec::new(3, 1).unwrap(); // Represents 0.3 /// let y = Dec::new(2, 1).unwrap(); // Represents 0.2 @@ -549,7 +549,6 @@ pub mod testing { #[cfg(test)] mod test_dec { use super::*; - use crate::types::token::{Amount, Change}; #[derive(Debug, Serialize, Deserialize)] struct SerializerTest { diff --git a/crates/core/src/types/eth_abi.rs b/crates/core/src/eth_abi.rs similarity index 95% rename from crates/core/src/types/eth_abi.rs rename to crates/core/src/eth_abi.rs index 886b5b9c46..3d460a25ae 100644 --- a/crates/core/src/types/eth_abi.rs +++ b/crates/core/src/eth_abi.rs @@ -7,8 +7,8 @@ use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; #[doc(inline)] pub use ethabi::token::Token; -use crate::types::keccak::{keccak_hash, KeccakHash}; -use crate::types::key::{Signable, SignableEthMessage}; +use crate::keccak::{keccak_hash, KeccakHash}; +use crate::key::{Signable, SignableEthMessage}; /// A container for data types that are able to be Ethereum ABI-encoded. #[derive(Clone, Debug, BorshSerialize, BorshDeserialize, BorshSchema)] @@ -40,7 +40,7 @@ impl ::std::cmp::PartialEq for EncodeCell { impl ::std::cmp::PartialOrd for EncodeCell { fn partial_cmp(&self, other: &Self) -> Option<::std::cmp::Ordering> { - self.encoded_data.partial_cmp(&other.encoded_data) + Some(self.cmp(other)) } } @@ -121,7 +121,6 @@ impl Encode for AbiEncode { // TODO: test signatures here once we merge secp keys #[cfg(test)] mod tests { - use std::convert::TryInto; use std::str::FromStr; use data_encoding::HEXLOWER; @@ -129,7 +128,7 @@ mod tests { use tiny_keccak::{Hasher, Keccak}; use super::*; - use crate::types::ethereum_events::EthAddress; + use crate::ethereum_events::EthAddress; /// Checks if we get the same result as `abi.encode`, for some given /// input data. diff --git a/crates/core/src/types/eth_bridge_pool.rs b/crates/core/src/eth_bridge_pool.rs similarity index 96% rename from crates/core/src/types/eth_bridge_pool.rs rename to crates/core/src/eth_bridge_pool.rs index 83bcef6c6a..86fa7daaea 100644 --- a/crates/core/src/types/eth_bridge_pool.rs +++ b/crates/core/src/eth_bridge_pool.rs @@ -13,14 +13,14 @@ use super::address::InternalAddress; use super::keccak::KeccakHash; use super::storage::{self, KeySeg}; use crate as namada_core; // This is needed for `StorageKeys` macro -use crate::types::address::Address; -use crate::types::eth_abi::Encode; -use crate::types::ethereum_events::{ +use crate::address::Address; +use crate::eth_abi::Encode; +use crate::ethereum_events::{ EthAddress, TransferToEthereum as TransferToEthereumEvent, }; -use crate::types::hash::Hash as HashDigest; -use crate::types::storage::{DbKeySeg, Key}; -use crate::types::token::Amount; +use crate::hash::Hash as HashDigest; +use crate::storage::{DbKeySeg, Key}; +use crate::token::Amount; /// The main address of the Ethereum bridge pool pub const BRIDGE_POOL_ADDRESS: Address = @@ -364,11 +364,11 @@ pub mod testing { use proptest::strategy::Strategy; use super::*; - use crate::types::address::testing::{ + use crate::address::testing::{ arb_established_address, arb_non_internal_address, }; - use crate::types::ethereum_events::testing::arb_eth_address; - use crate::types::token::testing::arb_amount; + use crate::ethereum_events::testing::arb_eth_address; + use crate::token::testing::arb_amount; prop_compose! { /// Generate an arbitrary pending transfer @@ -434,8 +434,7 @@ pub mod testing { #[cfg(test)] mod test_eth_bridge_pool_types { use super::*; - use crate::types::address::nam; - use crate::types::address::testing::established_address_1; + use crate::address::testing::{established_address_1, nam}; /// Test that [`PendingTransfer`] and [`TransferToEthereum`] /// have the same keccak hash, after being ABI encoded. diff --git a/crates/core/src/types/ethereum_events.rs b/crates/core/src/ethereum_events.rs similarity index 95% rename from crates/core/src/types/ethereum_events.rs rename to crates/core/src/ethereum_events.rs index 8569b118e4..3a381c401a 100644 --- a/crates/core/src/types/ethereum_events.rs +++ b/crates/core/src/ethereum_events.rs @@ -12,13 +12,13 @@ use ethabi::Token; use eyre::{eyre, Context}; use serde::{Deserialize, Serialize}; -use crate::types::address::Address; -use crate::types::eth_abi::Encode; -use crate::types::ethereum_structs::Erc20Transfer; -use crate::types::hash::Hash; -use crate::types::keccak::KeccakHash; -use crate::types::storage::{DbKeySeg, KeySeg}; -use crate::types::token::Amount; +use crate::address::Address; +use crate::eth_abi::Encode; +use crate::ethereum_structs::Erc20Transfer; +use crate::hash::Hash; +use crate::keccak::KeccakHash; +use crate::storage::{DbKeySeg, KeySeg}; +use crate::token::Amount; /// Namada native type to replace the ethabi::Uint type #[derive( @@ -40,7 +40,7 @@ pub struct Uint(pub [u64; 4]); impl PartialOrd for Uint { #[inline] fn partial_cmp(&self, other: &Self) -> Option { - ethUint(self.0).partial_cmp(ðUint(other.0)) + Some(self.cmp(other)) } } @@ -197,9 +197,9 @@ impl From for String { } impl KeySeg for EthAddress { - fn parse(string: String) -> crate::types::storage::Result { + fn parse(string: String) -> crate::storage::Result { Self::from_str(string.as_str()) - .map_err(|_| crate::types::storage::Error::ParseKeySeg(string)) + .map_err(|_| crate::storage::Error::ParseKeySeg(string)) } fn raw(&self) -> String { @@ -373,7 +373,7 @@ impl From for TransferToEthereum { Self { amount: { let uint = { - use crate::types::uint::Uint as NamadaUint; + use crate::uint::Uint as NamadaUint; let mut num_buf = [0; 32]; transfer.amount.to_little_endian(&mut num_buf); NamadaUint::from_little_endian(&num_buf) @@ -390,7 +390,6 @@ impl From for TransferToEthereum { #[cfg(test)] pub mod tests { - use std::str::FromStr; use super::*; @@ -450,7 +449,7 @@ pub mod testing { use proptest::prop_compose; use super::*; - use crate::types::token::{self, Amount}; + use crate::token; pub const DAI_ERC20_ETH_ADDRESS_CHECKSUMMED: &str = "0x6B175474E89094C44Da98b954EedeAC495271d0F"; diff --git a/crates/core/src/types/ethereum_structs.rs b/crates/core/src/ethereum_structs.rs similarity index 98% rename from crates/core/src/types/ethereum_structs.rs rename to crates/core/src/ethereum_structs.rs index 5b29e0e588..e2bd8a5ce0 100644 --- a/crates/core/src/types/ethereum_structs.rs +++ b/crates/core/src/ethereum_structs.rs @@ -9,7 +9,7 @@ pub use ethbridge_structs::*; use num256::Uint256; use serde::{Deserialize, Serialize}; -use crate::types::keccak::KeccakHash; +use crate::keccak::KeccakHash; /// Status of some Bridge pool transfer. #[derive( diff --git a/crates/core/src/event.rs b/crates/core/src/event.rs new file mode 100644 index 0000000000..d82121de50 --- /dev/null +++ b/crates/core/src/event.rs @@ -0,0 +1,207 @@ +//! Ledger events + +use std::collections::HashMap; +use std::fmt::{self, Display}; +use std::ops::{Index, IndexMut}; +use std::str::FromStr; + +use thiserror::Error; + +use crate::borsh::{BorshDeserialize, BorshSerialize}; +use crate::ethereum_structs::{BpTransferStatus, EthBridgeEvent}; +use crate::ibc::IbcEvent; + +/// Used in sub-systems that may emit events. +pub trait EmitEvents { + /// Emit an event + fn emit(&mut self, value: Event); +} + +impl EmitEvents for Vec { + fn emit(&mut self, value: Event) { + Vec::push(self, value) + } +} + +/// Indicates if an event is emitted do to +/// an individual Tx or the nature of a finalized block +#[derive(Clone, Debug, Eq, PartialEq, BorshSerialize, BorshDeserialize)] +pub enum EventLevel { + /// Indicates an event is to do with a finalized block. + Block, + /// Indicates an event is to do with an individual transaction. + Tx, +} + +/// Custom events that can be queried from Tendermint +/// using a websocket client +#[derive(Clone, Debug, Eq, PartialEq, BorshSerialize, BorshDeserialize)] +pub struct Event { + /// The type of event. + pub event_type: EventType, + /// The level of the event - whether it relates to a block or an individual + /// transaction. + pub level: EventLevel, + /// Key-value attributes of the event. + pub attributes: HashMap, +} + +/// The two types of custom events we currently use +#[derive(Clone, Debug, Eq, PartialEq, BorshSerialize, BorshDeserialize)] +pub enum EventType { + /// The transaction was accepted to be included in a block + Accepted, + /// The transaction was applied during block finalization + Applied, + /// The IBC transaction was applied during block finalization + Ibc(String), + /// The proposal that has been executed + Proposal, + /// The pgf payment + PgfPayment, + /// Ethereum Bridge event + EthereumBridge, +} + +impl Display for EventType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + EventType::Accepted => write!(f, "accepted"), + EventType::Applied => write!(f, "applied"), + EventType::Ibc(t) => write!(f, "{}", t), + EventType::Proposal => write!(f, "proposal"), + EventType::PgfPayment => write!(f, "pgf_payment"), + EventType::EthereumBridge => write!(f, "ethereum_bridge"), + }?; + Ok(()) + } +} + +impl FromStr for EventType { + type Err = EventError; + + fn from_str(s: &str) -> Result { + match s { + "accepted" => Ok(EventType::Accepted), + "applied" => Ok(EventType::Applied), + "proposal" => Ok(EventType::Proposal), + "pgf_payments" => Ok(EventType::PgfPayment), + // IBC + "update_client" => Ok(EventType::Ibc("update_client".to_string())), + "send_packet" => Ok(EventType::Ibc("send_packet".to_string())), + "write_acknowledgement" => { + Ok(EventType::Ibc("write_acknowledgement".to_string())) + } + "ethereum_bridge" => Ok(EventType::EthereumBridge), + _ => Err(EventError::InvalidEventType), + } + } +} + +/// Errors to do with emitting events. +#[derive(Error, Debug, Clone)] +pub enum EventError { + /// Error when parsing an event type + #[error("Invalid event type")] + InvalidEventType, + /// Error when parsing attributes from an event JSON. + #[error("Json missing `attributes` field")] + MissingAttributes, + /// Missing key in attributes. + #[error("Attributes missing key: {0}")] + MissingKey(String), + /// Missing value in attributes. + #[error("Attributes missing value: {0}")] + MissingValue(String), +} + +impl Event { + /// Check if the events keys contains a given string + pub fn contains_key(&self, key: &str) -> bool { + self.attributes.contains_key(key) + } + + /// Get the value corresponding to a given key, if it exists. + /// Else return None. + pub fn get(&self, key: &str) -> Option<&String> { + self.attributes.get(key) + } +} + +impl From for Event { + #[inline] + fn from(event: EthBridgeEvent) -> Event { + Self::from(&event) + } +} + +impl From<&EthBridgeEvent> for Event { + fn from(event: &EthBridgeEvent) -> Event { + match event { + EthBridgeEvent::BridgePool { tx_hash, status } => Event { + event_type: EventType::EthereumBridge, + level: EventLevel::Tx, + attributes: { + let mut attrs = HashMap::new(); + attrs.insert( + "kind".into(), + match status { + BpTransferStatus::Relayed => "bridge_pool_relayed", + BpTransferStatus::Expired => "bridge_pool_expired", + } + .into(), + ); + attrs.insert("tx_hash".into(), tx_hash.to_string()); + attrs + }, + }, + } + } +} + +impl Index<&str> for Event { + type Output = String; + + fn index(&self, index: &str) -> &Self::Output { + &self.attributes[index] + } +} + +impl IndexMut<&str> for Event { + fn index_mut(&mut self, index: &str) -> &mut Self::Output { + if !self.attributes.contains_key(index) { + self.attributes.insert(String::from(index), String::new()); + } + self.attributes.get_mut(index).unwrap() + } +} + +impl From for Event { + fn from(ibc_event: IbcEvent) -> Self { + Self { + event_type: EventType::Ibc(ibc_event.event_type), + level: EventLevel::Tx, + attributes: ibc_event.attributes, + } + } +} + +/// Convert our custom event into the necessary tendermint proto type +impl From for crate::tendermint_proto::v0_37::abci::Event { + fn from(event: Event) -> Self { + Self { + r#type: event.event_type.to_string(), + attributes: event + .attributes + .into_iter() + .map(|(key, value)| { + crate::tendermint_proto::v0_37::abci::EventAttribute { + key, + value, + index: true, + } + }) + .collect(), + } + } +} diff --git a/crates/core/src/types/hash.rs b/crates/core/src/hash.rs similarity index 100% rename from crates/core/src/types/hash.rs rename to crates/core/src/hash.rs diff --git a/crates/core/src/types/ibc.rs b/crates/core/src/ibc.rs similarity index 97% rename from crates/core/src/types/ibc.rs rename to crates/core/src/ibc.rs index f517c84363..d3d5995b09 100644 --- a/crates/core/src/types/ibc.rs +++ b/crates/core/src/ibc.rs @@ -7,6 +7,7 @@ use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use borsh_ext::BorshSerializeExt; use data_encoding::{DecodePartial, HEXLOWER, HEXLOWER_PERMISSIVE, HEXUPPER}; +pub use ibc::*; use serde::{Deserialize, Serialize}; use thiserror::Error; @@ -17,9 +18,9 @@ use crate::ibc::core::handler::types::events::{ Error as IbcEventError, IbcEvent as RawIbcEvent, }; use crate::ibc::primitives::proto::Protobuf; +use crate::masp::PaymentAddress; use crate::tendermint::abci::Event as AbciEvent; -use crate::types::masp::PaymentAddress; -use crate::types::token::Transfer; +use crate::token::Transfer; /// The event type defined in ibc-rs for receiving a token pub const EVENT_TYPE_PACKET: &str = "fungible_token_packet"; @@ -82,7 +83,7 @@ pub struct IbcEvent { impl std::cmp::PartialOrd for IbcEvent { fn partial_cmp(&self, other: &Self) -> Option { - self.event_type.partial_cmp(&other.event_type) + Some(self.cmp(other)) } } @@ -166,7 +167,7 @@ pub enum Error { } /// Conversion functions result -pub type Result = std::result::Result; +type Result = std::result::Result; impl TryFrom for IbcEvent { type Error = Error; diff --git a/crates/core/src/types/internal.rs b/crates/core/src/internal.rs similarity index 100% rename from crates/core/src/types/internal.rs rename to crates/core/src/internal.rs diff --git a/crates/core/src/types/keccak.rs b/crates/core/src/keccak.rs similarity index 97% rename from crates/core/src/types/keccak.rs rename to crates/core/src/keccak.rs index f6fc15724d..ba73bff5b1 100644 --- a/crates/core/src/types/keccak.rs +++ b/crates/core/src/keccak.rs @@ -1,7 +1,6 @@ //! This module is for hashing Namada types using the keccak256 //! hash function in a way that is compatible with smart contracts //! on Ethereum. -use std::convert::{TryFrom, TryInto}; use std::fmt; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; @@ -11,8 +10,8 @@ use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use thiserror::Error; pub use tiny_keccak::{Hasher, Keccak}; -use crate::types::eth_abi::Encode; -use crate::types::hash::{Hash, HASH_LENGTH}; +use crate::eth_abi::Encode; +use crate::hash::{Hash, HASH_LENGTH}; /// Errors for converting / parsing Keccak hashes #[allow(missing_docs)] diff --git a/crates/core/src/types/key/common.rs b/crates/core/src/key/common.rs similarity index 97% rename from crates/core/src/types/key/common.rs rename to crates/core/src/key/common.rs index 7db4641879..aa58c8700b 100644 --- a/crates/core/src/types/key/common.rs +++ b/crates/core/src/key/common.rs @@ -1,6 +1,4 @@ //! Cryptographic keys - -use std::convert::TryFrom; use std::fmt::Display; use std::str::FromStr; @@ -17,10 +15,9 @@ use super::{ ParseSignatureError, RefTo, SchemeType, SigScheme as SigSchemeTrait, VerifySigError, }; -use crate::impl_display_and_from_str_via_format; -use crate::types::ethereum_events::EthAddress; -use crate::types::key::{SignableBytes, StorageHasher}; -use crate::types::string_encoding; +use crate::ethereum_events::EthAddress; +use crate::key::{SignableBytes, StorageHasher}; +use crate::{impl_display_and_from_str_via_format, string_encoding}; /// Public key #[derive( @@ -456,14 +453,13 @@ impl super::SigScheme for SigScheme { #[cfg(test)] mod tests { use super::*; - use crate::types::key::ed25519; /// Run `cargo test gen_ed25519_keypair -- --nocapture` to generate a /// new ed25519 keypair wrapped in `common` key types. #[test] fn gen_ed25519_keypair() { let secret_key = - SecretKey::Ed25519(crate::types::key::testing::gen_keypair::< + SecretKey::Ed25519(crate::key::testing::gen_keypair::< ed25519::SigScheme, >()); let public_key = secret_key.to_public(); diff --git a/crates/core/src/types/key/ed25519.rs b/crates/core/src/key/ed25519.rs similarity index 98% rename from crates/core/src/types/key/ed25519.rs rename to crates/core/src/key/ed25519.rs index 2e118fe90e..a2b00ace80 100644 --- a/crates/core/src/types/key/ed25519.rs +++ b/crates/core/src/key/ed25519.rs @@ -19,7 +19,7 @@ use super::{ ParsePublicKeyError, ParseSecretKeyError, ParseSignatureError, RefTo, SchemeType, SigScheme as SigSchemeTrait, SignableBytes, VerifySigError, }; -use crate::types::key::StorageHasher; +use crate::key::StorageHasher; const PUBLIC_KEY_LENGTH: usize = 32; const SECRET_KEY_LENGTH: usize = 32; @@ -105,7 +105,7 @@ impl Hash for PublicKey { impl PartialOrd for PublicKey { fn partial_cmp(&self, other: &Self) -> Option { - self.0.to_bytes().partial_cmp(&other.0.to_bytes()) + Some(self.cmp(other)) } } @@ -316,7 +316,7 @@ impl Hash for Signature { impl PartialOrd for Signature { fn partial_cmp(&self, other: &Self) -> Option { - self.0.to_bytes().partial_cmp(&other.0.to_bytes()) + Some(self.cmp(other)) } } diff --git a/crates/core/src/types/key/mod.rs b/crates/core/src/key/mod.rs similarity index 97% rename from crates/core/src/types/key/mod.rs rename to crates/core/src/key/mod.rs index b07efc0ba4..dee77713f4 100644 --- a/crates/core/src/types/key/mod.rs +++ b/crates/core/src/key/mod.rs @@ -17,9 +17,9 @@ use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use thiserror::Error; -use crate::types::address; -use crate::types::hash::{KeccakHasher, Sha256Hasher, StorageHasher}; -use crate::types::keccak::{keccak_hash, KeccakHash}; +use crate::address; +use crate::hash::{KeccakHasher, Sha256Hasher, StorageHasher}; +use crate::keccak::{keccak_hash, KeccakHash}; /// Represents an error in signature verification #[allow(missing_docs)] @@ -458,25 +458,25 @@ impl SignableBytes for &[u8] {} impl SignableBytes for [u8; N] {} impl SignableBytes for &[u8; N] {} -impl SignableBytes for crate::types::hash::Hash { +impl SignableBytes for crate::hash::Hash { fn signable_hash(&self) -> [u8; 32] { self.0 } } -impl SignableBytes for &crate::types::hash::Hash { +impl SignableBytes for &crate::hash::Hash { fn signable_hash(&self) -> [u8; 32] { self.0 } } -impl SignableBytes for crate::types::keccak::KeccakHash { +impl SignableBytes for crate::keccak::KeccakHash { fn signable_hash(&self) -> [u8; 32] { self.0 } } -impl SignableBytes for &crate::types::keccak::KeccakHash { +impl SignableBytes for &crate::keccak::KeccakHash { fn signable_hash(&self) -> [u8; 32] { self.0 } @@ -485,13 +485,11 @@ impl SignableBytes for &crate::types::keccak::KeccakHash { /// Helpers for testing with keys. #[cfg(any(test, feature = "testing"))] pub mod testing { - use borsh::BorshDeserialize; use proptest::prelude::*; use rand::prelude::{StdRng, ThreadRng}; use rand::{thread_rng, SeedableRng}; - use super::SigScheme; - use crate::types::key::*; + use super::*; /// Generate an arbitrary public key pub fn arb_pk() diff --git a/crates/core/src/types/key/secp256k1.rs b/crates/core/src/key/secp256k1.rs similarity index 98% rename from crates/core/src/types/key/secp256k1.rs rename to crates/core/src/key/secp256k1.rs index 06ae2c4216..cacef20680 100644 --- a/crates/core/src/types/key/secp256k1.rs +++ b/crates/core/src/key/secp256k1.rs @@ -24,9 +24,9 @@ use super::{ ParsePublicKeyError, ParseSecretKeyError, ParseSignatureError, RefTo, SchemeType, SigScheme as SigSchemeTrait, SignableBytes, VerifySigError, }; -use crate::types::eth_abi::Encode; -use crate::types::ethereum_events::EthAddress; -use crate::types::key::StorageHasher; +use crate::eth_abi::Encode; +use crate::ethereum_events::EthAddress; +use crate::key::StorageHasher; /// The provided constant is for a traditional /// signature on this curve. For Ethereum, an extra byte is included @@ -118,7 +118,7 @@ impl Hash for PublicKey { impl PartialOrd for PublicKey { fn partial_cmp(&self, other: &Self) -> Option { - self.0.to_sec1_bytes().partial_cmp(&other.0.to_sec1_bytes()) + Some(self.cmp(other)) } } @@ -490,16 +490,16 @@ impl Hash for Signature { impl PartialOrd for Signature { fn partial_cmp(&self, other: &Self) -> Option { - match self.0.to_bytes().partial_cmp(&other.0.to_bytes()) { - Some(Ordering::Equal) => self.1.partial_cmp(&other.1), - res => res, - } + Some(self.cmp(other)) } } impl Ord for Signature { fn cmp(&self, other: &Self) -> Ordering { - self.partial_cmp(other).unwrap() + match self.0.to_bytes().cmp(&other.0.to_bytes()) { + Ordering::Equal => self.1.cmp(&other.1), + res => res, + } } } @@ -601,7 +601,6 @@ impl super::SigScheme for SigScheme { #[cfg(test)] mod test { - use k256::elliptic_curve::sec1::ToEncodedPoint; use super::*; diff --git a/crates/core/src/ledger/eth_bridge/mod.rs b/crates/core/src/ledger/eth_bridge/mod.rs deleted file mode 100644 index fda8964ff6..0000000000 --- a/crates/core/src/ledger/eth_bridge/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! Ethereum bridge account - -use crate::types::address::{Address, InternalAddress}; - -/// The [`InternalAddress`] of the Ethereum bridge account -pub const INTERNAL_ADDRESS: InternalAddress = InternalAddress::EthBridge; - -/// The [`Address`] of the Ethereum bridge account -pub const ADDRESS: Address = Address::Internal(INTERNAL_ADDRESS); diff --git a/crates/core/src/ledger/mod.rs b/crates/core/src/ledger/mod.rs deleted file mode 100644 index 70c1773bfb..0000000000 --- a/crates/core/src/ledger/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! The ledger modules - -pub mod eth_bridge; -pub mod inflation; -pub mod replay_protection; diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index c0d837517b..667aae67aa 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -7,13 +7,64 @@ #![deny(rustdoc::private_intra_doc_links)] pub mod bytes; +pub mod event; pub mod hints; -pub mod ledger; -pub mod types; -pub use {ibc, tendermint, tendermint_proto}; +pub use {masp_primitives, tendermint, tendermint_proto}; /// Borsh binary encoding (re-exported) from official crate with custom ext. pub mod borsh { pub use borsh::*; pub use borsh_ext::*; } + +pub mod account; +pub mod address; +pub mod chain; +pub mod dec; +pub mod eth_abi; +pub mod eth_bridge_pool; +pub mod ethereum_events; +pub mod ethereum_structs; +pub mod hash; +pub mod ibc; +pub mod internal; +pub mod keccak; +pub mod key; +pub mod masp; +pub mod parameters; +pub mod sign; +pub mod storage; +pub mod string_encoding; +pub mod time; +pub mod token; +pub mod uint; +pub mod validity_predicate; +pub mod voting_power; + +use borsh_ext::BorshSerializeExt; +use thiserror::Error; + +use crate::borsh::{BorshDeserialize, BorshSerialize}; + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum DecodeError { + #[error("Deserialization error: {0}")] + DeserializationError(std::io::Error), +} + +/// Encode a value with borsh +pub fn encode(value: &T) -> Vec +where + T: BorshSerialize, +{ + value.serialize_to_vec() +} + +/// Decode a value with borsh +pub fn decode(bytes: impl AsRef<[u8]>) -> Result +where + T: BorshDeserialize, +{ + T::try_from_slice(bytes.as_ref()).map_err(DecodeError::DeserializationError) +} diff --git a/crates/core/src/types/masp.rs b/crates/core/src/masp.rs similarity index 98% rename from crates/core/src/types/masp.rs rename to crates/core/src/masp.rs index 64b18eb6b9..ad37985461 100644 --- a/crates/core/src/types/masp.rs +++ b/crates/core/src/masp.rs @@ -9,14 +9,14 @@ use masp_primitives::asset_type::AssetType; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; +use crate::address::{Address, DecodeError, HASH_HEX_LEN, MASP}; use crate::impl_display_and_from_str_via_format; -use crate::types::address::{Address, DecodeError, HASH_HEX_LEN, MASP}; -use crate::types::storage::Epoch; -use crate::types::string_encoding::{ +use crate::storage::Epoch; +use crate::string_encoding::{ self, MASP_EXT_FULL_VIEWING_KEY_HRP, MASP_EXT_SPENDING_KEY_HRP, MASP_PAYMENT_ADDRESS_HRP, }; -use crate::types::token::{Denomination, MaspDigitPos}; +use crate::token::{Denomination, MaspDigitPos}; /// The plain representation of a MASP aaset #[derive( diff --git a/crates/core/src/types/mod.rs b/crates/core/src/mod.rs similarity index 100% rename from crates/core/src/types/mod.rs rename to crates/core/src/mod.rs diff --git a/crates/core/src/types/parameters.rs b/crates/core/src/parameters.rs similarity index 100% rename from crates/core/src/types/parameters.rs rename to crates/core/src/parameters.rs diff --git a/crates/core/src/types/sign.rs b/crates/core/src/sign.rs similarity index 100% rename from crates/core/src/types/sign.rs rename to crates/core/src/sign.rs diff --git a/crates/core/src/types/storage.rs b/crates/core/src/storage.rs similarity index 98% rename from crates/core/src/types/storage.rs rename to crates/core/src/storage.rs index da1ed2920e..dddb367f7c 100644 --- a/crates/core/src/types/storage.rs +++ b/crates/core/src/storage.rs @@ -1,10 +1,9 @@ //! Storage types use std::collections::VecDeque; -use std::convert::{TryFrom, TryInto}; use std::fmt::Display; use std::io::{Read, Write}; use std::num::ParseIntError; -use std::ops::{Add, AddAssign, Deref, Div, Drop, Mul, Rem, Sub}; +use std::ops::{Add, AddAssign, Deref, Div, Mul, Rem, Sub}; use std::str::FromStr; use arse_merkle_tree::InternalKey; @@ -16,13 +15,13 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use super::key::common; +use crate::address::{self, Address}; use crate::bytes::ByteBuf; +use crate::ethereum_events::{GetEventNonce, TransfersToNamada, Uint}; +use crate::hash::Hash; use crate::hints; -use crate::types::address::{self, Address}; -use crate::types::ethereum_events::{GetEventNonce, TransfersToNamada, Uint}; -use crate::types::hash::Hash; -use crate::types::keccak::{KeccakHash, TryFromError}; -use crate::types::time::DateTimeUtc; +use crate::keccak::{KeccakHash, TryFromError}; +use crate::time::DateTimeUtc; /// The maximum size of an IBC key (in bytes) allowed in merkle-ized storage pub const IBC_KEY_LIMIT: usize = 240; @@ -900,9 +899,8 @@ impl KeySeg for Address { impl KeySeg for Hash { fn parse(seg: String) -> Result { - seg.try_into().map_err(|e: crate::types::hash::Error| { - Error::ParseKeySeg(e.to_string()) - }) + seg.try_into() + .map_err(|e: crate::hash::Error| Error::ParseKeySeg(e.to_string())) } fn raw(&self) -> String { @@ -1478,7 +1476,7 @@ pub mod tests { use proptest::prelude::*; use super::*; - use crate::types::address::testing::arb_address; + use crate::address::testing::arb_address; proptest! { /// Tests that any key that doesn't contain reserved prefixes is valid. @@ -1907,9 +1905,7 @@ pub mod testing { use proptest::prelude::*; use super::*; - use crate::types::address::testing::{ - arb_address, arb_non_internal_address, - }; + use crate::address::testing::{arb_address, arb_non_internal_address}; prop_compose! { /// Generate an arbitrary epoch diff --git a/crates/core/src/types/string_encoding.rs b/crates/core/src/string_encoding.rs similarity index 94% rename from crates/core/src/types/string_encoding.rs rename to crates/core/src/string_encoding.rs index 47db158d85..e1109ab0d2 100644 --- a/crates/core/src/types/string_encoding.rs +++ b/crates/core/src/string_encoding.rs @@ -11,7 +11,7 @@ use std::fmt::Display; use std::ops::Deref; use std::str::FromStr; -use bech32::{self, FromBase32, ToBase32, Variant}; +use bech32::{FromBase32, ToBase32, Variant}; use borsh::{BorshDeserialize, BorshSerialize}; use serde::{Deserialize, Serialize}; use thiserror::Error; @@ -112,19 +112,15 @@ macro_rules! impl_display_and_from_str_via_format { ($t:path) => { impl std::fmt::Display for $t { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "{}", - $crate::types::string_encoding::Format::encode(self) - ) + write!(f, "{}", $crate::string_encoding::Format::encode(self)) } } impl std::str::FromStr for $t { - type Err = $crate::types::string_encoding::DecodeError; + type Err = $crate::string_encoding::DecodeError; fn from_str(s: &str) -> std::result::Result { - $crate::types::string_encoding::Format::decode(s) + $crate::string_encoding::Format::decode(s) } } }; diff --git a/crates/core/src/types/time.rs b/crates/core/src/time.rs similarity index 99% rename from crates/core/src/types/time.rs rename to crates/core/src/time.rs index 150096c691..3a5e1d617f 100644 --- a/crates/core/src/types/time.rs +++ b/crates/core/src/time.rs @@ -1,7 +1,6 @@ //! Types for dealing with time and durations. use std::collections::BTreeMap; -use std::convert::{TryFrom, TryInto}; use std::fmt::Display; use std::io::Read; use std::ops::{Add, Sub}; diff --git a/crates/core/src/types/token.rs b/crates/core/src/token.rs similarity index 93% rename from crates/core/src/types/token.rs rename to crates/core/src/token.rs index 02370b9854..5f6c9c6c2a 100644 --- a/crates/core/src/types/token.rs +++ b/crates/core/src/token.rs @@ -1,7 +1,6 @@ //! A basic fungible token use std::cmp::Ordering; -use std::collections::BTreeMap; use std::fmt::Display; use std::iter::Sum; use std::ops::{Add, AddAssign, Div, Mul, Sub, SubAssign}; @@ -10,42 +9,16 @@ use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use data_encoding::BASE32HEX_NOPAD; use ethabi::ethereum_types::U256; -use masp_primitives::asset_type::AssetType; -use masp_primitives::convert::AllowedConversion; -use masp_primitives::merkle_tree::FrozenCommitmentTree; -use masp_primitives::sapling; use serde::{Deserialize, Serialize}; use thiserror::Error; +use crate::address::Address; +use crate::dec::{Dec, POS_DECIMAL_PRECISION}; +use crate::hash::Hash; use crate::ibc::apps::transfer::types::Amount as IbcAmount; -use crate::types::address::Address; -use crate::types::dec::{Dec, POS_DECIMAL_PRECISION}; -use crate::types::hash::Hash; -use crate::types::storage; -use crate::types::storage::{DbKeySeg, Epoch, KeySeg}; -use crate::types::uint::{self, Uint, I256}; - -/// A representation of the conversion state -#[derive(Debug, Default, BorshSerialize, BorshDeserialize)] -pub struct ConversionState { - /// The last amount of the native token distributed - pub normed_inflation: Option, - /// The tree currently containing all the conversions - pub tree: FrozenCommitmentTree, - /// A map from token alias to actual address. - pub tokens: BTreeMap, - /// Map assets to their latest conversion and position in Merkle tree - #[allow(clippy::type_complexity)] - pub assets: BTreeMap< - AssetType, - ( - (Address, Denomination, MaspDigitPos), - Epoch, - AllowedConversion, - usize, - ), - >, -} +use crate::storage; +use crate::storage::{DbKeySeg, KeySeg}; +use crate::uint::{self, Uint, I256}; /// Amount in micro units. For different granularity another representation /// might be more appropriate. @@ -572,6 +545,12 @@ impl FromStr for DenominatedAmount { impl PartialOrd for DenominatedAmount { fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for DenominatedAmount { + fn cmp(&self, other: &Self) -> Ordering { if self.denom < other.denom { let diff = other.denom.0 - self.denom.0; let (div, rem) = @@ -581,12 +560,12 @@ impl PartialOrd for DenominatedAmount { } else { div + Uint::one() }; - let ord = self.amount.raw.partial_cmp(&div_ceil); - if let Some(Ordering::Equal) = ord { + let ord = self.amount.raw.cmp(&div_ceil); + if let Ordering::Equal = ord { if rem.is_zero() { - Some(Ordering::Equal) + Ordering::Equal } else { - Some(Ordering::Greater) + Ordering::Greater } } else { ord @@ -600,12 +579,12 @@ impl PartialOrd for DenominatedAmount { } else { div + Uint::one() }; - let ord = div_ceil.partial_cmp(&other.amount.raw); - if let Some(Ordering::Equal) = ord { + let ord = div_ceil.cmp(&other.amount.raw); + if let Ordering::Equal = ord { if rem.is_zero() { - Some(Ordering::Equal) + Ordering::Equal } else { - Some(Ordering::Less) + Ordering::Less } } else { ord @@ -614,12 +593,6 @@ impl PartialOrd for DenominatedAmount { } } -impl Ord for DenominatedAmount { - fn cmp(&self, other: &Self) -> Ordering { - self.partial_cmp(other).unwrap() - } -} - impl serde::Serialize for Amount { fn serialize( &self, @@ -1002,44 +975,6 @@ impl From for IbcAmount { } } -/// Token parameters for each kind of asset held on chain -#[derive( - Clone, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - BorshSerialize, - BorshDeserialize, - BorshSchema, - Deserialize, - Serialize, -)] -pub struct MaspParams { - /// Maximum reward rate - pub max_reward_rate: Dec, - /// Shielded Pool nominal derivative gain - pub kd_gain_nom: Dec, - /// Shielded Pool nominal proportional gain for the given token - pub kp_gain_nom: Dec, - /// Target amount for the given token that is locked in the shielded pool - /// TODO: should this be a Uint or DenominatedAmount??? - pub locked_amount_target: u64, -} - -impl Default for MaspParams { - fn default() -> Self { - Self { - max_reward_rate: Dec::from_str("0.1").unwrap(), - kp_gain_nom: Dec::from_str("0.25").unwrap(), - kd_gain_nom: Dec::from_str("0.25").unwrap(), - locked_amount_target: 10_000_u64, - } - } -} - /// A simple bilateral token transfer #[derive( Debug, @@ -1085,7 +1020,7 @@ pub mod testing { use proptest::prelude::*; use super::*; - use crate::types::address::testing::{ + use crate::address::testing::{ arb_established_address, arb_non_internal_address, }; diff --git a/crates/core/src/types/uint.rs b/crates/core/src/uint.rs similarity index 98% rename from crates/core/src/types/uint.rs rename to crates/core/src/uint.rs index e13cb9ce1b..63d732cf7c 100644 --- a/crates/core/src/types/uint.rs +++ b/crates/core/src/uint.rs @@ -12,8 +12,8 @@ use num_traits::{CheckedAdd, CheckedMul, CheckedSub}; use uint::construct_uint; use super::dec::{Dec, POS_DECIMAL_PRECISION}; -use crate::types::token; -use crate::types::token::{Amount, AmountParseError, MaspDigitPos}; +use crate::token; +use crate::token::{Amount, AmountParseError, MaspDigitPos}; /// The value zero. pub const ZERO: Uint = Uint::from_u64(0); @@ -636,29 +636,29 @@ impl Neg for I256 { impl PartialOrd for I256 { fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for I256 { + fn cmp(&self, other: &Self) -> Ordering { match (self.non_negative(), other.non_negative()) { - (true, false) => Some(Ordering::Greater), - (false, true) => Some(Ordering::Less), + (true, false) => Ordering::Greater, + (false, true) => Ordering::Less, (true, true) => { let this = self.abs(); let that = other.abs(); - this.partial_cmp(&that) + this.cmp(&that) } (false, false) => { let this = self.abs(); let that = other.abs(); - that.partial_cmp(&this) + that.cmp(&this) } } } } -impl Ord for I256 { - fn cmp(&self, other: &Self) -> Ordering { - self.partial_cmp(other).unwrap() - } -} - impl Add for I256 { type Output = Self; diff --git a/crates/core/src/types/validity_predicate.rs b/crates/core/src/validity_predicate.rs similarity index 100% rename from crates/core/src/types/validity_predicate.rs rename to crates/core/src/validity_predicate.rs diff --git a/crates/core/src/types/voting_power.rs b/crates/core/src/voting_power.rs similarity index 99% rename from crates/core/src/types/voting_power.rs rename to crates/core/src/voting_power.rs index 014e0bade9..b4164b58f0 100644 --- a/crates/core/src/types/voting_power.rs +++ b/crates/core/src/voting_power.rs @@ -14,8 +14,8 @@ use num_traits::ops::checked::CheckedAdd; use serde::de::Visitor; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -use crate::types::token::Amount; -use crate::types::uint::Uint; +use crate::token::Amount; +use crate::uint::Uint; /// Namada voting power, normalized to the range `0 - 2^32`. #[derive( diff --git a/crates/encoding_spec/src/main.rs b/crates/encoding_spec/src/main.rs index d6bb9df06c..70bb8db141 100644 --- a/crates/encoding_spec/src/main.rs +++ b/crates/encoding_spec/src/main.rs @@ -17,7 +17,6 @@ use std::collections::{BTreeMap, HashSet}; use std::io::Write; -use std::iter::Extend; use borsh::schema::{BorshSchemaContainer, Declaration, Definition}; use borsh::{schema, schema_container_of}; @@ -25,12 +24,12 @@ use itertools::Itertools; use lazy_static::lazy_static; use madato::types::TableRow; use namada::account; +use namada::core::address::Address; +use namada::core::key::ed25519::{PublicKey, Signature}; +use namada::core::storage::{self, Epoch}; +use namada::core::token; use namada::ledger::parameters::Parameters; use namada::tx::data::{pos, TxType, WrapperTx}; -use namada::types::address::Address; -use namada::types::key::ed25519::{PublicKey, Signature}; -use namada::types::storage::{self, Epoch}; -use namada::types::token; /// This generator will write output into this `docs` file. const OUTPUT_PATH: &str = @@ -288,10 +287,10 @@ fn definition_to_table(name: &Declaration, def: schema::Definition) -> Table { (desc, rows) } schema::Definition::Sequence { - length_width, + length_width: 0, length_range, elements, - } if length_width == 0 => { + } => { let rows = None; let desc = format!( "Fixed-size array with {} elements of {}", @@ -330,7 +329,7 @@ fn definition_to_table(name: &Declaration, def: schema::Definition) -> Table { } => { let mut rows = madato::types::Table::default(); // build rows for: Variant, Name, Type - for (_, (variant, name, type_name)) in variants.iter().enumerate() { + for (variant, name, type_name) in variants.iter() { rows.push(TableRow::from_iter([ ("Prefix byte".into(), variant.to_string()), ("Name".into(), name.clone()), diff --git a/crates/ethereum_bridge/src/lib.rs b/crates/ethereum_bridge/src/lib.rs index 31d9fc1e62..eef7126bab 100644 --- a/crates/ethereum_bridge/src/lib.rs +++ b/crates/ethereum_bridge/src/lib.rs @@ -6,5 +6,5 @@ pub mod storage; #[cfg(any(test, feature = "testing"))] pub mod test_utils; -pub use namada_core::ledger::eth_bridge::ADDRESS; +pub use namada_core::address::ETH_BRIDGE as ADDRESS; pub use namada_trans_token as token; diff --git a/crates/ethereum_bridge/src/oracle/config.rs b/crates/ethereum_bridge/src/oracle/config.rs index 33b66a34d7..37e5a94cf9 100644 --- a/crates/ethereum_bridge/src/oracle/config.rs +++ b/crates/ethereum_bridge/src/oracle/config.rs @@ -1,8 +1,8 @@ //! Configuration for an oracle. use std::num::NonZeroU64; -use namada_core::types::ethereum_events::EthAddress; -use namada_core::types::ethereum_structs; +use namada_core::ethereum_events::EthAddress; +use namada_core::ethereum_structs; /// Configuration for an oracle. #[derive(Clone, Debug, Eq, PartialEq, Hash, Ord, PartialOrd)] diff --git a/crates/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs b/crates/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs index afd307fb87..088806bffa 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs @@ -1,13 +1,13 @@ use std::collections::{HashMap, HashSet}; use eyre::Result; -use namada_core::types::address::Address; -use namada_core::types::keccak::keccak_hash; -use namada_core::types::key::{common, SignableEthMessage}; -use namada_core::types::storage::BlockHeight; -use namada_core::types::token::Amount; +use namada_core::address::Address; +use namada_core::keccak::keccak_hash; +use namada_core::key::{common, SignableEthMessage}; +use namada_core::storage::BlockHeight; +use namada_core::token::Amount; use namada_proof_of_stake::pos_queries::PosQueries; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_storage::{StorageRead, StorageWrite}; use namada_tx::data::TxResult; use namada_tx::Signed; @@ -25,7 +25,7 @@ use crate::storage::vote_tallies::{self, BridgePoolRoot}; /// Sign the latest Bridge pool root, and return the associated /// vote extension protocol transaction. pub fn sign_bridge_pool_root( - wl_storage: &WlStorage, + state: &WlState, validator_addr: &Address, eth_hot_key: &common::SecretKey, protocol_key: &common::SecretKey, @@ -34,18 +34,15 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - if !wl_storage.ethbridge_queries().is_bridge_active() { + if !state.ethbridge_queries().is_bridge_active() { return None; } - let bp_root = wl_storage.ethbridge_queries().get_bridge_pool_root().0; - let nonce = wl_storage - .ethbridge_queries() - .get_bridge_pool_nonce() - .to_bytes(); + let bp_root = state.ethbridge_queries().get_bridge_pool_root().0; + let nonce = state.ethbridge_queries().get_bridge_pool_nonce().to_bytes(); let to_sign = keccak_hash([bp_root.as_slice(), nonce.as_slice()].concat()); let signed = Signed::<_, SignableEthMessage>::new(eth_hot_key, to_sign); let ext = bridge_pool_roots::Vext { - block_height: wl_storage.storage.get_last_block_height(), + block_height: state.in_mem().get_last_block_height(), validator_addr: validator_addr.clone(), sig: signed.sig, }; @@ -61,7 +58,7 @@ where /// validators, the signature is made available for bridge /// pool proofs. pub fn apply_derived_tx( - wl_storage: &mut WlStorage, + state: &mut WlState, vext: MultiSignedVext, ) -> Result where @@ -76,14 +73,14 @@ where "Applying state updates derived from signatures of the Ethereum \ bridge pool root and nonce." ); - let voting_powers = utils::get_voting_powers(wl_storage, &vext)?; + let voting_powers = utils::get_voting_powers(state, &vext)?; let root_height = vext.iter().next().unwrap().data.block_height; - let (partial_proof, seen_by) = parse_vexts(wl_storage, vext); + let (partial_proof, seen_by) = parse_vexts(state, vext); // return immediately if a complete proof has already been acquired let bp_key = vote_tallies::Keys::from((&partial_proof, root_height)); let seen = - votes::storage::maybe_read_seen(wl_storage, &bp_key)?.unwrap_or(false); + votes::storage::maybe_read_seen(state, &bp_key)?.unwrap_or(false); if seen { tracing::debug!( ?root_height, @@ -94,19 +91,14 @@ where } // apply updates to the bridge pool root. - let (mut changed, confirmed_update) = apply_update( - wl_storage, - bp_key, - partial_proof, - seen_by, - &voting_powers, - )?; + let (mut changed, confirmed_update) = + apply_update(state, bp_key, partial_proof, seen_by, &voting_powers)?; // if the root is confirmed, update storage and add // relevant key to changed. if let Some(proof) = confirmed_update { let signed_root_key = get_signed_root_key(); - let should_write_root = wl_storage + let should_write_root = state .read::<(BridgePoolRoot, BlockHeight)>(&signed_root_key) .expect( "Reading a signed Bridge pool root from storage should not \ @@ -127,12 +119,9 @@ where ?root_height, "New Bridge pool root proof acquired" ); - wl_storage - .write(&signed_root_key, (proof, root_height)) - .expect( - "Writing a signed Bridge pool root to storage should not \ - fail.", - ); + state.write(&signed_root_key, (proof, root_height)).expect( + "Writing a signed Bridge pool root to storage should not fail.", + ); changed.insert(get_signed_root_key()); } else { tracing::debug!( @@ -161,7 +150,7 @@ impl GetVoters for &MultiSignedVext { /// Convert a set of signatures over bridge pool roots and nonces (at a certain /// height) into a partial proof and a new set of votes. fn parse_vexts( - wl_storage: &WlStorage, + state: &WlState, multisigned: MultiSignedVext, ) -> (BridgePoolRoot, Votes) where @@ -169,19 +158,19 @@ where H: 'static + StorageHasher + Sync, { let height = multisigned.iter().next().unwrap().data.block_height; - let epoch = wl_storage.pos_queries().get_epoch(height); - let root = wl_storage + let epoch = state.pos_queries().get_epoch(height); + let root = state .ethbridge_queries() .get_bridge_pool_root_at_height(height) .expect("A BP root should be available at the given height"); - let nonce = wl_storage + let nonce = state .ethbridge_queries() .get_bridge_pool_nonce_at_height(height); let mut partial_proof = BridgePoolRootProof::new((root, nonce)); partial_proof.attach_signature_batch(multisigned.clone().into_iter().map( |SignedVext(signed)| { ( - wl_storage + state .ethbridge_queries() .get_eth_addr_book(&signed.data.validator_addr, epoch) .unwrap(), @@ -206,7 +195,7 @@ where /// /// In all instances, the changed storage keys are returned. fn apply_update( - wl_storage: &mut WlStorage, + state: &mut WlState, bp_key: vote_tallies::Keys, mut update: BridgePoolRoot, seen_by: Votes, @@ -216,7 +205,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let partial_proof = votes::storage::read_body(wl_storage, &bp_key); + let partial_proof = votes::storage::read_body(state, &bp_key); let (vote_tracking, changed, confirmed, already_present) = if let Ok( partial, ) = @@ -229,7 +218,7 @@ where update.0.attach_signature_batch(partial.0.signatures); let new_votes = NewVotes::new(seen_by, voting_powers)?; let (vote_tracking, changed) = - votes::update::calculate(wl_storage, &bp_key, new_votes)?; + votes::update::calculate(state, &bp_key, new_votes)?; if changed.is_empty() { return Ok((changed, None)); } @@ -237,14 +226,14 @@ where (vote_tracking, changed, confirmed, true) } else { tracing::debug!(%bp_key.prefix, "No validator has signed this bridge pool update before."); - let vote_tracking = calculate_new(wl_storage, seen_by, voting_powers)?; + let vote_tracking = calculate_new(state, seen_by, voting_powers)?; let changed = bp_key.into_iter().collect(); let confirmed = vote_tracking.seen; (vote_tracking, changed, confirmed, false) }; votes::storage::write( - wl_storage, + state, &bp_key, &update, &vote_tracking, @@ -259,16 +248,14 @@ mod test_apply_bp_roots_to_storage { use assert_matches::assert_matches; use borsh::BorshDeserialize; - use namada_core::types::address; - use namada_core::types::ethereum_events::Uint; - use namada_core::types::keccak::{keccak_hash, KeccakHash}; - use namada_core::types::storage::Key; - use namada_core::types::voting_power::FractionalVotingPower; + use namada_core::address; + use namada_core::ethereum_events::Uint; + use namada_core::keccak::KeccakHash; + use namada_core::storage::Key; + use namada_core::voting_power::FractionalVotingPower; use namada_proof_of_stake::parameters::OwnedPosParams; use namada_proof_of_stake::storage::write_pos_params; - use namada_state::testing::TestWlStorage; - use namada_storage::StorageRead; - use namada_vote_ext::bridge_pool_roots; + use namada_state::testing::TestState; use super::*; use crate::protocol::transactions::votes::{ @@ -285,7 +272,7 @@ mod test_apply_bp_roots_to_storage { /// The validator keys. keys: HashMap, /// Storage. - wl_storage: TestWlStorage, + state: TestState, } /// Setup storage for tests. @@ -297,7 +284,7 @@ mod test_apply_bp_roots_to_storage { let validator_a = address::testing::established_address_2(); let validator_b = address::testing::established_address_3(); let validator_c = address::testing::established_address_4(); - let (mut wl_storage, keys) = test_utils::setup_storage_with_validators( + let (mut state, keys) = test_utils::setup_storage_with_validators( HashMap::from_iter(vec![ (validator_a.clone(), Amount::native_whole(100)), (validator_b.clone(), Amount::native_whole(100)), @@ -305,30 +292,31 @@ mod test_apply_bp_roots_to_storage { ]), ); // First commit - wl_storage.storage.block.height = 1.into(); - wl_storage.commit_block().unwrap(); + state.in_mem_mut().block.height = 1.into(); + state.commit_block().unwrap(); - vp::bridge_pool::init_storage(&mut wl_storage); + vp::bridge_pool::init_storage(&mut state); test_utils::commit_bridge_pool_root_at_height( - &mut wl_storage, + &mut state, &KeccakHash([1; 32]), 99.into(), ); test_utils::commit_bridge_pool_root_at_height( - &mut wl_storage, + &mut state, &KeccakHash([1; 32]), 100.into(), ); - wl_storage + state .write(&get_key_from_hash(&KeccakHash([1; 32])), BlockHeight(101)) .expect("Test failed"); - wl_storage + state .write(&get_nonce_key(), Uint::from(42)) .expect("Test failed"); + state.commit_block().unwrap(); TestPackage { validators: [validator_a, validator_b, validator_c], keys, - wl_storage, + state, } } @@ -342,10 +330,10 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let hot_key = &keys[&validators[0]].eth_bridge; let vext = bridge_pool_roots::Vext { @@ -356,8 +344,7 @@ mod test_apply_bp_roots_to_storage { } .sign(&keys[&validators[0]].protocol); let TxResult { changed_keys, .. } = - apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let bp_root_key = vote_tallies::Keys::from(( &BridgePoolRoot(BridgePoolRootProof::new((root, nonce))), 100.into(), @@ -374,8 +361,7 @@ mod test_apply_bp_roots_to_storage { .sign(&keys[&validators[2]].protocol); let TxResult { changed_keys, .. } = - apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let expected: BTreeSet = [bp_root_key.seen_by(), bp_root_key.voting_power()] @@ -392,10 +378,10 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let hot_key = &keys[&validators[0]].eth_bridge; let mut vexts: MultiSignedVext = bridge_pool_roots::Vext { @@ -415,7 +401,7 @@ mod test_apply_bp_roots_to_storage { .sign(&keys[&validators[1]].protocol); vexts.insert(vext); let TxResult { changed_keys, .. } = - apply_derived_tx(&mut wl_storage, vexts).expect("Test failed"); + apply_derived_tx(&mut state, vexts).expect("Test failed"); let bp_root_key = vote_tallies::Keys::from(( &BridgePoolRoot(BridgePoolRootProof::new((root, nonce))), 100.into(), @@ -434,10 +420,10 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let hot_key = &keys[&validators[0]].eth_bridge; let vext = bridge_pool_roots::Vext { @@ -447,8 +433,7 @@ mod test_apply_bp_roots_to_storage { .sig, } .sign(&keys[&validators[0]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let hot_key = &keys[&validators[1]].eth_bridge; let vext = bridge_pool_roots::Vext { @@ -458,8 +443,7 @@ mod test_apply_bp_roots_to_storage { } .sign(&keys[&validators[1]].protocol); let TxResult { changed_keys, .. } = - apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let bp_root_key = vote_tallies::Keys::from(( &BridgePoolRoot(BridgePoolRootProof::new((root, nonce))), 100.into(), @@ -481,10 +465,10 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let bp_root_key = vote_tallies::Keys::from(( &BridgePoolRoot(BridgePoolRootProof::new((root, nonce))), @@ -499,13 +483,12 @@ mod test_apply_bp_roots_to_storage { .sig, } .sign(&keys[&validators[0]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); - let voting_power = wl_storage + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); + let voting_power = state .read::(&bp_root_key.voting_power()) .expect("Test failed") .expect("Test failed") - .fractional_stake(&wl_storage); + .fractional_stake(&state); assert_eq!( voting_power, FractionalVotingPower::new_u64(5, 12).unwrap() @@ -518,13 +501,12 @@ mod test_apply_bp_roots_to_storage { sig: Signed::<_, SignableEthMessage>::new(hot_key, to_sign).sig, } .sign(&keys[&validators[1]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); - let voting_power = wl_storage + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); + let voting_power = state .read::(&bp_root_key.voting_power()) .expect("Test failed") .expect("Test failed") - .fractional_stake(&wl_storage); + .fractional_stake(&state); assert_eq!(voting_power, FractionalVotingPower::new_u64(5, 6).unwrap()); } @@ -534,10 +516,10 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let hot_key = &keys[&validators[0]].eth_bridge; @@ -553,11 +535,10 @@ mod test_apply_bp_roots_to_storage { .sig, } .sign(&keys[&validators[0]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let seen: bool = BorshDeserialize::try_from_slice( - wl_storage + state .read_bytes(&bp_root_key.seen()) .expect("Test failed") .expect("Test failed") @@ -573,11 +554,10 @@ mod test_apply_bp_roots_to_storage { sig: Signed::<_, SignableEthMessage>::new(hot_key, to_sign).sig, } .sign(&keys[&validators[1]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let seen: bool = BorshDeserialize::try_from_slice( - wl_storage + state .read_bytes(&bp_root_key.seen()) .expect("Test failed") .expect("Test failed") @@ -593,10 +573,10 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let hot_key = &keys[&validators[0]].eth_bridge; @@ -612,12 +592,11 @@ mod test_apply_bp_roots_to_storage { .sig, } .sign(&keys[&validators[0]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let expected = Votes::from([(validators[0].clone(), 100.into())]); let seen_by: Votes = BorshDeserialize::try_from_slice( - wl_storage + state .read_bytes(&bp_root_key.seen_by()) .expect("Test failed") .expect("Test failed") @@ -633,15 +612,14 @@ mod test_apply_bp_roots_to_storage { sig: Signed::<_, SignableEthMessage>::new(hot_key, to_sign).sig, } .sign(&keys[&validators[1]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let expected = Votes::from([ (validators[0].clone(), 100.into()), (validators[1].clone(), 100.into()), ]); let seen_by: Votes = BorshDeserialize::try_from_slice( - wl_storage + state .read_bytes(&bp_root_key.seen_by()) .expect("Test failed") .expect("Test failed") @@ -657,10 +635,10 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let hot_key = &keys[&validators[0]].eth_bridge; let mut expected = @@ -673,21 +651,20 @@ mod test_apply_bp_roots_to_storage { sig: Signed::<_, SignableEthMessage>::new(hot_key, to_sign).sig, }; expected.0.attach_signature( - wl_storage + state .ethbridge_queries() .get_eth_addr_book( &validators[0], - wl_storage.pos_queries().get_epoch(100.into()), + state.pos_queries().get_epoch(100.into()), ) .expect("Test failed"), vext.sig.clone(), ); let vext = vext.sign(&keys[&validators[0]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let proof: BridgePoolRootProof = BorshDeserialize::try_from_slice( - wl_storage + state .read_bytes(&bp_root_key.body()) .expect("Test failed") .expect("Test failed") @@ -705,14 +682,14 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); assert!( - wl_storage + state .read_bytes(&get_signed_root_key()) .expect("Test failed") .is_none() @@ -737,12 +714,12 @@ mod test_apply_bp_roots_to_storage { .sign(&keys[&validators[1]].protocol); vexts.insert(vext); - let epoch = wl_storage.pos_queries().get_epoch(100.into()); + let epoch = state.pos_queries().get_epoch(100.into()); let sigs: Vec<_> = vexts .iter() .map(|s| { ( - wl_storage + state .ethbridge_queries() .get_eth_addr_book(&s.data.validator_addr, epoch) .expect("Test failed"), @@ -751,10 +728,10 @@ mod test_apply_bp_roots_to_storage { }) .collect(); - _ = apply_derived_tx(&mut wl_storage, vexts).expect("Test failed"); + _ = apply_derived_tx(&mut state, vexts).expect("Test failed"); let (proof, _): (BridgePoolRootProof, BlockHeight) = BorshDeserialize::try_from_slice( - wl_storage + state .read_bytes(&get_signed_root_key()) .expect("Test failed") .expect("Test failed") @@ -783,7 +760,7 @@ mod test_apply_bp_roots_to_storage { let validator_3_stake = Amount::native_whole(100); // start epoch 0 with validator 1 - let (mut wl_storage, keys) = test_utils::setup_storage_with_validators( + let (mut state, keys) = test_utils::setup_storage_with_validators( HashMap::from([(validator_1.clone(), validator_1_stake)]), ); @@ -792,11 +769,11 @@ mod test_apply_bp_roots_to_storage { pipeline_len: 1, ..Default::default() }; - write_pos_params(&mut wl_storage, ¶ms).expect("Test failed"); + write_pos_params(&mut state, ¶ms).expect("Test failed"); // insert validators 2 and 3 at epoch 1 test_utils::append_validators_to_storage( - &mut wl_storage, + &mut state, HashMap::from([ (validator_2.clone(), validator_2_stake), (validator_3.clone(), validator_3_stake), @@ -807,7 +784,7 @@ mod test_apply_bp_roots_to_storage { macro_rules! query_validators { () => { |epoch: u64| { - wl_storage + state .pos_queries() .get_consensus_validators(Some(epoch.into())) .iter() @@ -827,9 +804,7 @@ mod test_apply_bp_roots_to_storage { HashMap::from([(validator_1.clone(), validator_1_stake)]) ); assert_eq!( - wl_storage - .pos_queries() - .get_total_voting_power(Some(0.into())), + state.pos_queries().get_total_voting_power(Some(0.into())), validator_1_stake, ); assert_eq!( @@ -841,23 +816,21 @@ mod test_apply_bp_roots_to_storage { ]) ); assert_eq!( - wl_storage - .pos_queries() - .get_total_voting_power(Some(1.into())), + state.pos_queries().get_total_voting_power(Some(1.into())), validator_1_stake + validator_2_stake + validator_3_stake, ); // set up the bridge pool's storage - vp::bridge_pool::init_storage(&mut wl_storage); + vp::bridge_pool::init_storage(&mut state); test_utils::commit_bridge_pool_root_at_height( - &mut wl_storage, + &mut state, &KeccakHash([1; 32]), 3.into(), ); // construct proof - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let hot_key = &keys[&validator_1].eth_bridge; let vext = bridge_pool_roots::Vext { @@ -867,16 +840,15 @@ mod test_apply_bp_roots_to_storage { } .sign(&keys[&validator_1].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); // query validator set of the proof // (should be the one from epoch 0) - let (_, root_height) = wl_storage + let (_, root_height) = state .ethbridge_queries() .get_signed_bridge_pool_root() .expect("Test failed"); - let root_epoch = wl_storage + let root_epoch = state .pos_queries() .get_epoch(root_height) .expect("Test failed"); @@ -894,11 +866,11 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); macro_rules! decide_at_height { @@ -914,7 +886,7 @@ mod test_apply_bp_roots_to_storage { .sig, } .sign(&keys[&validators[0]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) + _ = apply_derived_tx(&mut state, vext.into()) .expect("Test failed"); let hot_key = &keys[&validators[1]].eth_bridge; let vext = bridge_pool_roots::Vext { @@ -927,7 +899,7 @@ mod test_apply_bp_roots_to_storage { .sig, } .sign(&keys[&validators[1]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) + _ = apply_derived_tx(&mut state, vext.into()) .expect("Test failed"); }; } @@ -936,7 +908,7 @@ mod test_apply_bp_roots_to_storage { decide_at_height!(100); // check the signed root in storage - let root_in_storage = wl_storage + let root_in_storage = state .read::<(BridgePoolRoot, BlockHeight)>(&get_signed_root_key()) .expect("Test failed - storage read failed") .expect("Test failed - no signed root in storage"); @@ -950,7 +922,7 @@ mod test_apply_bp_roots_to_storage { decide_at_height!(99); // check the signed root in storage is unchanged - let root_in_storage = wl_storage + let root_in_storage = state .read::<(BridgePoolRoot, BlockHeight)>(&get_signed_root_key()) .expect("Test failed - storage read failed") .expect("Test failed - no signed root in storage"); diff --git a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/eth_msgs.rs b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/eth_msgs.rs index 515c793a65..3267912131 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/eth_msgs.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/eth_msgs.rs @@ -1,5 +1,5 @@ use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use namada_core::types::ethereum_events::EthereumEvent; +use namada_core::ethereum_events::EthereumEvent; use namada_vote_ext::ethereum_events::MultiSignedEthEvent; use crate::protocol::transactions::votes::{dedupe, Tally, Votes}; @@ -51,11 +51,11 @@ pub struct EthMsg { mod tests { use std::collections::BTreeSet; - use namada_core::types::address; - use namada_core::types::ethereum_events::testing::{ + use namada_core::address; + use namada_core::ethereum_events::testing::{ arbitrary_nonce, arbitrary_single_transfer, }; - use namada_core::types::storage::BlockHeight; + use namada_core::storage::BlockHeight; use super::*; diff --git a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs index 1cd2144eec..818ec66f10 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs @@ -5,22 +5,21 @@ use std::str::FromStr; use borsh::BorshDeserialize; use eyre::{Result, WrapErr}; -use namada_core::hints; -use namada_core::ledger::eth_bridge::ADDRESS as BRIDGE_ADDRESS; -use namada_core::types::address::Address; -use namada_core::types::eth_abi::Encode; -use namada_core::types::eth_bridge_pool::{ +use namada_core::address::Address; +use namada_core::eth_abi::Encode; +use namada_core::eth_bridge_pool::{ erc20_nut_address, erc20_token_address, PendingTransfer, TransferToEthereumKind, }; -use namada_core::types::ethereum_events::{ +use namada_core::ethereum_events::{ EthAddress, EthereumEvent, TransferToEthereum, TransferToNamada, TransfersToNamada, }; -use namada_core::types::ethereum_structs::EthBridgeEvent; -use namada_core::types::storage::{BlockHeight, Key, KeySeg}; +use namada_core::ethereum_structs::EthBridgeEvent; +use namada_core::hints; +use namada_core::storage::{BlockHeight, Key, KeySeg}; use namada_parameters::read_epoch_duration_parameter; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_storage::{StorageRead, StorageWrite}; use namada_trans_token::storage_key::{balance_key, minted_balance_key}; @@ -31,13 +30,13 @@ use crate::storage::bridge_pool::{ use crate::storage::eth_bridge_queries::{EthAssetMint, EthBridgeQueries}; use crate::storage::parameters::read_native_erc20_address; use crate::storage::{self as bridge_storage}; -use crate::token; +use crate::{token, ADDRESS as BRIDGE_ADDRESS}; /// Updates storage based on the given confirmed `event`. For example, for a /// confirmed [`EthereumEvent::TransfersToNamada`], mint the corresponding /// transferred assets to the appropriate receiver addresses. pub(super) fn act_on( - wl_storage: &mut WlStorage, + state: &mut WlState, event: EthereumEvent, ) -> Result<(BTreeSet, BTreeSet)> where @@ -47,7 +46,7 @@ where match event { EthereumEvent::TransfersToNamada { transfers, nonce } => { act_on_transfers_to_namada( - wl_storage, + state, TransfersToNamada { transfers, nonce }, ) } @@ -55,7 +54,7 @@ where ref transfers, ref relayer, .. - } => act_on_transfers_to_eth(wl_storage, transfers, relayer), + } => act_on_transfers_to_eth(state, transfers, relayer), _ => { tracing::debug!(?event, "No actions taken for Ethereum event"); Ok(Default::default()) @@ -64,7 +63,7 @@ where } fn act_on_transfers_to_namada<'tx, D, H>( - wl_storage: &mut WlStorage, + state: &mut WlState, transfer_event: TransfersToNamada, ) -> Result<(BTreeSet, BTreeSet)> where @@ -75,15 +74,15 @@ where let mut changed_keys = BTreeSet::new(); // we need to collect the events into a separate // buffer because of rust's borrowing rules :| - let confirmed_events: Vec<_> = wl_storage - .storage + let confirmed_events: Vec<_> = state + .in_mem_mut() .eth_events_queue .transfers_to_namada .push_and_iter(transfer_event) .collect(); for TransfersToNamada { transfers, .. } in confirmed_events { update_transfers_to_namada_state( - wl_storage, + state, &mut changed_keys, transfers.iter(), )?; @@ -96,7 +95,7 @@ where } fn update_transfers_to_namada_state<'tx, D, H>( - wl_storage: &mut WlStorage, + state: &mut WlState, changed_keys: &mut BTreeSet, transfers: impl IntoIterator, ) -> Result<()> @@ -104,7 +103,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let wrapped_native_erc20 = read_native_erc20_address(wl_storage)?; + let wrapped_native_erc20 = read_native_erc20_address(state)?; for transfer in transfers { tracing::debug!( ?transfer, @@ -117,7 +116,7 @@ where } = transfer; let mut changed = if asset != &wrapped_native_erc20 { let (asset_count, changed) = - mint_eth_assets(wl_storage, asset, receiver, amount)?; + mint_eth_assets(state, asset, receiver, amount)?; // TODO: query denomination of the whitelisted token from storage, // and print this amount with the proper formatting; for now, use // NAM's formatting @@ -137,12 +136,7 @@ where } changed } else { - redeem_native_token( - wl_storage, - &wrapped_native_erc20, - receiver, - amount, - )? + redeem_native_token(state, &wrapped_native_erc20, receiver, amount)? }; changed_keys.append(&mut changed) } @@ -151,7 +145,7 @@ where /// Redeems `amount` of the native token for `receiver` from escrow. fn redeem_native_token( - wl_storage: &mut WlStorage, + state: &mut WlState, native_erc20: &EthAddress, receiver: &Address, amount: &token::Amount, @@ -161,49 +155,41 @@ where H: 'static + StorageHasher + Sync, { let eth_bridge_native_token_balance_key = - balance_key(&wl_storage.storage.native_token, &BRIDGE_ADDRESS); + balance_key(&state.in_mem().native_token, &BRIDGE_ADDRESS); let receiver_native_token_balance_key = - balance_key(&wl_storage.storage.native_token, receiver); + balance_key(&state.in_mem().native_token, receiver); let native_werc20_supply_key = minted_balance_key(&erc20_token_address(native_erc20)); - update::amount( - wl_storage, - ð_bridge_native_token_balance_key, - |balance| { - tracing::debug!( - %eth_bridge_native_token_balance_key, - ?balance, - "Existing value found", - ); - balance.spend(amount)?; - tracing::debug!( - %eth_bridge_native_token_balance_key, - ?balance, - "New value calculated", - ); - Ok(()) - }, - )?; - update::amount( - wl_storage, - &receiver_native_token_balance_key, - |balance| { - tracing::debug!( - %receiver_native_token_balance_key, - ?balance, - "Existing value found", - ); - balance.receive(amount)?; - tracing::debug!( - %receiver_native_token_balance_key, - ?balance, - "New value calculated", - ); - Ok(()) - }, - )?; - update::amount(wl_storage, &native_werc20_supply_key, |balance| { + update::amount(state, ð_bridge_native_token_balance_key, |balance| { + tracing::debug!( + %eth_bridge_native_token_balance_key, + ?balance, + "Existing value found", + ); + balance.spend(amount)?; + tracing::debug!( + %eth_bridge_native_token_balance_key, + ?balance, + "New value calculated", + ); + Ok(()) + })?; + update::amount(state, &receiver_native_token_balance_key, |balance| { + tracing::debug!( + %receiver_native_token_balance_key, + ?balance, + "Existing value found", + ); + balance.receive(amount)?; + tracing::debug!( + %receiver_native_token_balance_key, + ?balance, + "New value calculated", + ); + Ok(()) + })?; + update::amount(state, &native_werc20_supply_key, |balance| { tracing::debug!( %native_werc20_supply_key, ?balance, @@ -237,7 +223,7 @@ where /// If the given asset is not whitelisted or has exceeded the /// token caps, mint NUTs, too. fn mint_eth_assets( - wl_storage: &mut WlStorage, + state: &mut WlState, asset: &EthAddress, receiver: &Address, &amount: &token::Amount, @@ -248,7 +234,7 @@ where { let mut changed_keys = BTreeSet::default(); - let asset_count = wl_storage + let asset_count = state .ethbridge_queries() .get_eth_assets_to_mint(asset, amount); @@ -269,7 +255,7 @@ where for (token, ref amount) in assets_to_mint { let balance_key = balance_key(&token, receiver); - update::amount(wl_storage, &balance_key, |balance| { + update::amount(state, &balance_key, |balance| { tracing::debug!( %balance_key, ?balance, @@ -286,7 +272,7 @@ where _ = changed_keys.insert(balance_key); let supply_key = minted_balance_key(&token); - update::amount(wl_storage, &supply_key, |supply| { + update::amount(state, &supply_key, |supply| { tracing::debug!( %supply_key, ?supply, @@ -307,7 +293,7 @@ where } fn act_on_transfers_to_eth( - wl_storage: &mut WlStorage, + state: &mut WlState, transfers: &[TransferToEthereum], relayer: &Address, ) -> Result<(BTreeSet, BTreeSet)> @@ -324,12 +310,12 @@ where // halts the Ethereum bridge, since nonces will fall out // of sync between Namada and Ethereum let nonce_key = get_nonce_key(); - increment_bp_nonce(&nonce_key, wl_storage)?; + increment_bp_nonce(&nonce_key, state)?; changed_keys.insert(nonce_key); // all keys of pending transfers let prefix = BRIDGE_POOL_ADDRESS.to_db_key().into(); - let mut pending_keys: HashSet = wl_storage + let mut pending_keys: HashSet = state .iter_prefix(&prefix) .context("Failed to iterate over storage")? .map(|(k, _, _)| { @@ -340,7 +326,7 @@ where // Remove the completed transfers from the bridge pool for event in transfers { let (pending_transfer, key) = if let Some((pending, key)) = - wl_storage.ethbridge_queries().lookup_transfer_to_eth(event) + state.ethbridge_queries().lookup_transfer_to_eth(event) { (pending, key) } else { @@ -353,7 +339,7 @@ where and burning any Ethereum assets in Namada" ); changed_keys.append(&mut update_transferred_asset_balances( - wl_storage, + state, &pending_transfer, )?); let pool_balance_key = @@ -361,14 +347,14 @@ where let relayer_rewards_key = balance_key(&pending_transfer.gas_fee.token, relayer); // give the relayer the gas fee for this transfer. - update::amount(wl_storage, &relayer_rewards_key, |balance| { + update::amount(state, &relayer_rewards_key, |balance| { balance.receive(&pending_transfer.gas_fee.amount) })?; // the gas fee is removed from escrow. - update::amount(wl_storage, &pool_balance_key, |balance| { + update::amount(state, &pool_balance_key, |balance| { balance.spend(&pending_transfer.gas_fee.amount) })?; - wl_storage.delete(&key)?; + state.delete(&key)?; _ = pending_keys.remove(&key); _ = changed_keys.insert(key); _ = changed_keys.insert(pool_balance_key); @@ -383,21 +369,21 @@ where } // TODO the timeout height is min_num_blocks of an epoch for now - let epoch_duration = read_epoch_duration_parameter(wl_storage)?; + let epoch_duration = read_epoch_duration_parameter(state)?; let timeout_offset = epoch_duration.min_num_of_blocks; // Check time out and refund - if wl_storage.storage.block.height.0 > timeout_offset { + if state.in_mem().block.height.0 > timeout_offset { let timeout_height = - BlockHeight(wl_storage.storage.block.height.0 - timeout_offset); + BlockHeight(state.in_mem().block.height.0 - timeout_offset); for key in pending_keys { let inserted_height = BlockHeight::try_from_slice( - &wl_storage.storage.block.tree.get(&key)?, + &state.in_mem().block.tree.get(&key)?, ) .expect("BlockHeight should be decoded"); if inserted_height <= timeout_height { let (mut keys, mut new_tx_events) = - refund_transfer(wl_storage, key)?; + refund_transfer(state, key)?; changed_keys.append(&mut keys); tx_events.append(&mut new_tx_events); } @@ -409,23 +395,23 @@ where fn increment_bp_nonce( nonce_key: &Key, - wl_storage: &mut WlStorage, + state: &mut WlState, ) -> Result<()> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let next_nonce = wl_storage + let next_nonce = state .ethbridge_queries() .get_bridge_pool_nonce() .checked_increment() .expect("Bridge pool nonce has overflowed"); - wl_storage.write(nonce_key, next_nonce)?; + state.write(nonce_key, next_nonce)?; Ok(()) } fn refund_transfer( - wl_storage: &mut WlStorage, + state: &mut WlState, key: Key, ) -> Result<(BTreeSet, BTreeSet)> where @@ -435,15 +421,15 @@ where let mut changed_keys = BTreeSet::default(); let mut tx_events = BTreeSet::default(); - let transfer = match wl_storage.read_bytes(&key)? { + let transfer = match state.read_bytes(&key)? { Some(v) => PendingTransfer::try_from_slice(&v[..])?, None => unreachable!(), }; - changed_keys.append(&mut refund_transfer_fees(wl_storage, &transfer)?); - changed_keys.append(&mut refund_transferred_assets(wl_storage, &transfer)?); + changed_keys.append(&mut refund_transfer_fees(state, &transfer)?); + changed_keys.append(&mut refund_transferred_assets(state, &transfer)?); // Delete the key from the bridge pool - wl_storage.delete(&key)?; + state.delete(&key)?; _ = changed_keys.insert(key); // Emit expiration event @@ -455,7 +441,7 @@ where } fn refund_transfer_fees( - wl_storage: &mut WlStorage, + state: &mut WlState, transfer: &PendingTransfer, ) -> Result> where @@ -468,10 +454,10 @@ where balance_key(&transfer.gas_fee.token, &transfer.gas_fee.payer); let pool_balance_key = balance_key(&transfer.gas_fee.token, &BRIDGE_POOL_ADDRESS); - update::amount(wl_storage, &payer_balance_key, |balance| { + update::amount(state, &payer_balance_key, |balance| { balance.receive(&transfer.gas_fee.amount) })?; - update::amount(wl_storage, &pool_balance_key, |balance| { + update::amount(state, &pool_balance_key, |balance| { balance.spend(&transfer.gas_fee.amount) })?; @@ -482,7 +468,7 @@ where } fn refund_transferred_assets( - wl_storage: &mut WlStorage, + state: &mut WlState, transfer: &PendingTransfer, ) -> Result> where @@ -491,7 +477,7 @@ where { let mut changed_keys = BTreeSet::default(); - let native_erc20_addr = match wl_storage + let native_erc20_addr = match state .read_bytes(&bridge_storage::native_erc20_key())? { Some(v) => EthAddress::try_from_slice(&v[..])?, @@ -501,9 +487,9 @@ where }; let (source, target) = if transfer.transfer.asset == native_erc20_addr { let escrow_balance_key = - balance_key(&wl_storage.storage.native_token, &BRIDGE_ADDRESS); + balance_key(&state.in_mem().native_token, &BRIDGE_ADDRESS); let sender_balance_key = balance_key( - &wl_storage.storage.native_token, + &state.in_mem().native_token, &transfer.transfer.sender, ); (escrow_balance_key, sender_balance_key) @@ -513,10 +499,10 @@ where let sender_balance_key = balance_key(&token, &transfer.transfer.sender); (escrow_balance_key, sender_balance_key) }; - update::amount(wl_storage, &source, |balance| { + update::amount(state, &source, |balance| { balance.spend(&transfer.transfer.amount) })?; - update::amount(wl_storage, &target, |balance| { + update::amount(state, &target, |balance| { balance.receive(&transfer.transfer.amount) })?; @@ -529,7 +515,7 @@ where /// Burns any transferred ERC20s other than wNAM. If NAM is transferred, /// update the wNAM supply key. fn update_transferred_asset_balances( - wl_storage: &mut WlStorage, + state: &mut WlState, transfer: &PendingTransfer, ) -> Result> where @@ -538,7 +524,7 @@ where { let mut changed_keys = BTreeSet::default(); - let maybe_addr = wl_storage.read(&bridge_storage::native_erc20_key())?; + let maybe_addr = state.read(&bridge_storage::native_erc20_key())?; let Some(native_erc20_addr) = maybe_addr else { return Err(eyre::eyre!("Could not read wNam key from storage")); }; @@ -554,7 +540,7 @@ where unreachable!("Attempted to mint wNAM NUTs!"); } let supply_key = minted_balance_key(&token); - update::amount(wl_storage, &supply_key, |supply| { + update::amount(state, &supply_key, |supply| { supply.receive(&transfer.transfer.amount) })?; _ = changed_keys.insert(supply_key); @@ -565,13 +551,13 @@ where // other asset kinds must be burned let escrow_balance_key = balance_key(&token, &BRIDGE_POOL_ADDRESS); - update::amount(wl_storage, &escrow_balance_key, |balance| { + update::amount(state, &escrow_balance_key, |balance| { balance.spend(&transfer.transfer.amount) })?; _ = changed_keys.insert(escrow_balance_key); let supply_key = minted_balance_key(&token); - update::amount(wl_storage, &supply_key, |supply| { + update::amount(state, &supply_key, |supply| { supply.spend(&transfer.transfer.amount) })?; _ = changed_keys.insert(supply_key); @@ -585,37 +571,33 @@ mod tests { use std::collections::HashMap; use assert_matches::assert_matches; - use eyre::Result; - use namada_core::borsh::BorshSerializeExt; - use namada_core::types::address::testing::gen_implicit_address; - use namada_core::types::address::{gen_established_address, nam, wnam}; - use namada_core::types::eth_bridge_pool::GasFee; - use namada_core::types::ethereum_events::testing::{ + use namada_core::address::gen_established_address; + use namada_core::address::testing::{gen_implicit_address, nam, wnam}; + use namada_core::eth_bridge_pool::GasFee; + use namada_core::ethereum_events::testing::{ arbitrary_keccak_hash, arbitrary_nonce, DAI_ERC20_ETH_ADDRESS, }; - use namada_core::types::time::DurationSecs; - use namada_core::types::token::Amount; - use namada_core::types::{address, eth_bridge_pool}; + use namada_core::time::DurationSecs; + use namada_core::token::Amount; + use namada_core::{address, eth_bridge_pool}; use namada_parameters::{update_epoch_parameter, EpochDuration}; - use namada_state::testing::TestWlStorage; - use namada_storage::mockdb::MockDBWriteBatch; + use namada_state::testing::TestState; use super::*; use crate::storage::bridge_pool::get_pending_key; use crate::storage::wrapped_erc20s; use crate::test_utils::{self, stored_keys_count}; - fn init_storage(wl_storage: &mut TestWlStorage) { + fn init_storage(state: &mut TestState) { // set the timeout height offset let timeout_offset = 10; let epoch_duration = EpochDuration { min_num_of_blocks: timeout_offset, min_duration: DurationSecs(5), }; - update_epoch_parameter(wl_storage, &epoch_duration) - .expect("Test failed"); + update_epoch_parameter(state, &epoch_duration).expect("Test failed"); // set native ERC20 token - wl_storage + state .write(&bridge_storage::native_erc20_key(), wnam()) .expect("Test failed"); } @@ -690,7 +672,7 @@ mod tests { } fn init_bridge_pool_transfers( - wl_storage: &mut TestWlStorage, + state: &mut TestState, assets_transferred: A, ) -> Vec where @@ -719,10 +701,7 @@ mod tests { }, }; let key = get_pending_key(&transfer); - wl_storage - .storage - .write(&key, transfer.serialize_to_vec()) - .expect("Test failed"); + state.write(&key, &transfer).expect("Test failed"); pending_transfers.push(transfer); } @@ -730,11 +709,9 @@ mod tests { } #[inline] - fn init_bridge_pool( - wl_storage: &mut TestWlStorage, - ) -> Vec { + fn init_bridge_pool(state: &mut TestState) -> Vec { init_bridge_pool_transfers( - wl_storage, + state, (0..2) .map(|i| { ( @@ -753,7 +730,7 @@ mod tests { } fn init_balance( - wl_storage: &mut TestWlStorage, + state: &mut TestState, pending_transfers: &Vec, ) { for transfer in pending_transfers { @@ -761,12 +738,10 @@ mod tests { let payer = address::testing::established_address_2(); let payer_key = balance_key(&transfer.gas_fee.token, &payer); let payer_balance = Amount::from(0); - wl_storage - .write(&payer_key, payer_balance) - .expect("Test failed"); + state.write(&payer_key, payer_balance).expect("Test failed"); let escrow_key = balance_key(&transfer.gas_fee.token, &BRIDGE_POOL_ADDRESS); - update::amount(wl_storage, &escrow_key, |balance| { + update::amount(state, &escrow_key, |balance| { let gas_fee = Amount::from_u64(1); balance.receive(&gas_fee) }) @@ -776,43 +751,41 @@ mod tests { // native ERC20 let sender_key = balance_key(&nam(), &transfer.transfer.sender); let sender_balance = Amount::from(0); - wl_storage + state .write(&sender_key, sender_balance) .expect("Test failed"); let escrow_key = balance_key(&nam(), &BRIDGE_ADDRESS); let escrow_balance = Amount::from(10); - wl_storage + state .write(&escrow_key, escrow_balance) .expect("Test failed"); } else { let token = transfer.token_address(); let sender_key = balance_key(&token, &transfer.transfer.sender); let sender_balance = Amount::from(0); - wl_storage + state .write(&sender_key, sender_balance) .expect("Test failed"); let escrow_key = balance_key(&token, &BRIDGE_POOL_ADDRESS); let escrow_balance = Amount::from(10); - wl_storage + state .write(&escrow_key, escrow_balance) .expect("Test failed"); - update::amount( - wl_storage, - &minted_balance_key(&token), - |supply| supply.receive(&transfer.transfer.amount), - ) + update::amount(state, &minted_balance_key(&token), |supply| { + supply.receive(&transfer.transfer.amount) + }) .expect("Test failed"); }; } } #[test] - /// Test that we do not make any changes to wl_storage when acting on most + /// Test that we do not make any changes to state when acting on most /// events fn test_act_on_does_nothing_for_other_events() { - let mut wl_storage = TestWlStorage::default(); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); - let initial_stored_keys_count = stored_keys_count(&wl_storage); + let mut state = TestState::default(); + test_utils::bootstrap_ethereum_bridge(&mut state); + let initial_stored_keys_count = stored_keys_count(&state); let events = vec![EthereumEvent::ValidatorSetUpdate { nonce: arbitrary_nonce(), bridge_validator_hash: arbitrary_keccak_hash(), @@ -820,9 +793,9 @@ mod tests { }]; for event in events { - act_on(&mut wl_storage, event.clone()).unwrap(); + act_on(&mut state, event.clone()).unwrap(); assert_eq!( - stored_keys_count(&wl_storage), + stored_keys_count(&state), initial_stored_keys_count, "storage changed unexpectedly while acting on event: {:#?}", event @@ -831,13 +804,13 @@ mod tests { } #[test] - /// Test that wl_storage is indeed changed when we act on a non-empty + /// Test that state is indeed changed when we act on a non-empty /// TransfersToNamada batch fn test_act_on_changes_storage_for_transfers_to_namada() { - let mut wl_storage = TestWlStorage::default(); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); - wl_storage.commit_block().expect("Test failed"); - let initial_stored_keys_count = stored_keys_count(&wl_storage); + let mut state = TestState::default(); + test_utils::bootstrap_ethereum_bridge(&mut state); + state.commit_block().expect("Test failed"); + let initial_stored_keys_count = stored_keys_count(&state); let amount = Amount::from(100); let receiver = address::testing::established_address_1(); let transfers = vec![TransferToNamada { @@ -850,12 +823,9 @@ mod tests { transfers, }; - act_on(&mut wl_storage, event).unwrap(); + act_on(&mut state, event).unwrap(); - assert_eq!( - stored_keys_count(&wl_storage), - initial_stored_keys_count + 2 - ); + assert_eq!(stored_keys_count(&state), initial_stored_keys_count + 2); } /// Parameters to test minting DAI in Namada. @@ -882,11 +852,11 @@ mod tests { }; assert_eq!(self.transferred_amount, nut_amount + erc20_amount); - let mut wl_storage = TestWlStorage::default(); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); + let mut state = TestState::default(); + test_utils::bootstrap_ethereum_bridge(&mut state); if !dai_token_cap.is_zero() { test_utils::whitelist_tokens( - &mut wl_storage, + &mut state, [( DAI_ERC20_ETH_ADDRESS, test_utils::WhitelistMeta { @@ -905,7 +875,7 @@ mod tests { }]; update_transfers_to_namada_state( - &mut wl_storage, + &mut state, &mut BTreeSet::new(), &transfers, ) @@ -923,9 +893,9 @@ mod tests { let receiver_balance_key = balance_key(&wdai, &receiver); let wdai_supply_key = minted_balance_key(&wdai); - for key in vec![receiver_balance_key, wdai_supply_key] { + for key in [receiver_balance_key, wdai_supply_key] { let value: Option = - wl_storage.read(&key).unwrap(); + state.read(&key).unwrap(); if expected_amount.is_zero() { assert_matches!(value, None); } else { @@ -973,12 +943,12 @@ mod tests { /// that pending transfers are deleted from the Bridge pool, the /// Bridge pool nonce is updated and escrowed assets are burned. fn test_act_on_changes_storage_for_transfers_to_eth() { - let mut wl_storage = TestWlStorage::default(); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); - wl_storage.commit_block().expect("Test failed"); - init_storage(&mut wl_storage); + let mut state = TestState::default(); + test_utils::bootstrap_ethereum_bridge(&mut state); + state.commit_block().expect("Test failed"); + init_storage(&mut state); let native_erc20 = - read_native_erc20_address(&wl_storage).expect("Test failed"); + read_native_erc20_address(&state).expect("Test failed"); let random_erc20 = EthAddress([0xff; 20]); let random_erc20_token = wrapped_erc20s::nut(&random_erc20); let random_erc20_2 = EthAddress([0xee; 20]); @@ -992,7 +962,7 @@ mod tests { 19, ]); let pending_transfers = init_bridge_pool_transfers( - &mut wl_storage, + &mut state, [ (native_erc20, TransferData::default()), (random_erc20, TransferDataBuilder::new().kind_nut().build()), @@ -1016,7 +986,7 @@ mod tests { ), ], ); - init_balance(&mut wl_storage, &pending_transfers); + init_balance(&mut state, &pending_transfers); let pending_keys: HashSet = pending_transfers.iter().map(get_pending_key).collect(); let relayer = gen_established_address("random"); @@ -1038,20 +1008,20 @@ mod tests { &BRIDGE_POOL_ADDRESS, ); let mut bp_nam_balance_pre = Amount::try_from_slice( - &wl_storage + &state .read_bytes(&pool_nam_balance_key) .expect("Test failed") .expect("Test failed"), ) .expect("Test failed"); let mut bp_erc_balance_pre = Amount::try_from_slice( - &wl_storage + &state .read_bytes(&pool_erc_balance_key) .expect("Test failed") .expect("Test failed"), ) .expect("Test failed"); - let (mut changed_keys, _) = act_on(&mut wl_storage, event).unwrap(); + let (mut changed_keys, _) = act_on(&mut state, event).unwrap(); for erc20 in [ random_erc20_token, @@ -1081,15 +1051,12 @@ mod tests { let prefix = BRIDGE_POOL_ADDRESS.to_db_key().into(); assert_eq!( - wl_storage - .iter_prefix(&prefix) - .expect("Test failed") - .count(), + state.iter_prefix(&prefix).expect("Test failed").count(), // NOTE: we should have one write -- the bridge pool nonce update 1 ); let relayer_nam_balance = Amount::try_from_slice( - &wl_storage + &state .read_bytes(&payer_nam_balance_key) .expect("Test failed: read error") .expect("Test failed: no value in storage"), @@ -1097,7 +1064,7 @@ mod tests { .expect("Test failed"); assert_eq!(relayer_nam_balance, Amount::from(3)); let relayer_erc_balance = Amount::try_from_slice( - &wl_storage + &state .read_bytes(&payer_erc_balance_key) .expect("Test failed: read error") .expect("Test failed: no value in storage"), @@ -1106,14 +1073,14 @@ mod tests { assert_eq!(relayer_erc_balance, Amount::from(2)); let bp_nam_balance_post = Amount::try_from_slice( - &wl_storage + &state .read_bytes(&pool_nam_balance_key) .expect("Test failed: read error") .expect("Test failed: no value in storage"), ) .expect("Test failed"); let bp_erc_balance_post = Amount::try_from_slice( - &wl_storage + &state .read_bytes(&pool_erc_balance_key) .expect("Test failed: read error") .expect("Test failed: no value in storage"), @@ -1133,19 +1100,16 @@ mod tests { /// Test that the transfers time out in the bridge pool then the refund when /// we act on a TransfersToEthereum fn test_act_on_timeout_for_transfers_to_eth() { - let mut wl_storage = TestWlStorage::default(); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); - wl_storage.commit_block().expect("Test failed"); - init_storage(&mut wl_storage); + let mut state = TestState::default(); + test_utils::bootstrap_ethereum_bridge(&mut state); + state.commit_block().expect("Test failed"); + init_storage(&mut state); // Height 0 - let pending_transfers = init_bridge_pool(&mut wl_storage); - init_balance(&mut wl_storage, &pending_transfers); - wl_storage - .storage - .commit_block(MockDBWriteBatch) - .expect("Test failed"); + let pending_transfers = init_bridge_pool(&mut state); + init_balance(&mut state, &pending_transfers); + state.commit_block().expect("Test failed"); // pending transfers time out - wl_storage.storage.block.height += 10 + 1; + state.in_mem_mut().block.height += 10 + 1; // new pending transfer let transfer = PendingTransfer { transfer: eth_bridge_pool::TransferToEthereum { @@ -1162,15 +1126,9 @@ mod tests { }, }; let key = get_pending_key(&transfer); - wl_storage - .storage - .write(&key, transfer.serialize_to_vec()) - .expect("Test failed"); - wl_storage - .storage - .commit_block(MockDBWriteBatch) - .expect("Test failed"); - wl_storage.storage.block.height += 1; + state.write(&key, transfer).expect("Test failed"); + state.commit_block().expect("Test failed"); + state.in_mem_mut().block.height += 1; // This should only refund let event = EthereumEvent::TransfersToEthereum { @@ -1178,15 +1136,12 @@ mod tests { transfers: vec![], relayer: gen_implicit_address(), }; - let _ = act_on(&mut wl_storage, event).unwrap(); + let _ = act_on(&mut state, event).unwrap(); // The latest transfer is still pending let prefix = BRIDGE_POOL_ADDRESS.to_db_key().into(); assert_eq!( - wl_storage - .iter_prefix(&prefix) - .expect("Test failed") - .count(), + state.iter_prefix(&prefix).expect("Test failed").count(), // NOTE: we should have two writes -- one of them being // the bridge pool nonce update 2 @@ -1198,13 +1153,13 @@ mod tests { .fold(Amount::from(0), |acc, t| acc + t.gas_fee.amount); let payer = address::testing::established_address_2(); let payer_key = balance_key(&nam(), &payer); - let value = wl_storage.read_bytes(&payer_key).expect("Test failed"); + let value = state.read_bytes(&payer_key).expect("Test failed"); let payer_balance = Amount::try_from_slice(&value.expect("Test failed")) .expect("Test failed"); assert_eq!(payer_balance, expected); let pool_key = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); - let value = wl_storage.read_bytes(&pool_key).expect("Test failed"); + let value = state.read_bytes(&pool_key).expect("Test failed"); let pool_balance = Amount::try_from_slice(&value.expect("Test failed")) .expect("Test failed"); assert_eq!(pool_balance, Amount::from(0)); @@ -1213,15 +1168,13 @@ mod tests { for transfer in pending_transfers { if transfer.transfer.asset == wnam() { let sender_key = balance_key(&nam(), &transfer.transfer.sender); - let value = - wl_storage.read_bytes(&sender_key).expect("Test failed"); + let value = state.read_bytes(&sender_key).expect("Test failed"); let sender_balance = Amount::try_from_slice(&value.expect("Test failed")) .expect("Test failed"); assert_eq!(sender_balance, transfer.transfer.amount); let escrow_key = balance_key(&nam(), &BRIDGE_ADDRESS); - let value = - wl_storage.read_bytes(&escrow_key).expect("Test failed"); + let value = state.read_bytes(&escrow_key).expect("Test failed"); let escrow_balance = Amount::try_from_slice(&value.expect("Test failed")) .expect("Test failed"); @@ -1229,15 +1182,13 @@ mod tests { } else { let token = transfer.token_address(); let sender_key = balance_key(&token, &transfer.transfer.sender); - let value = - wl_storage.read_bytes(&sender_key).expect("Test failed"); + let value = state.read_bytes(&sender_key).expect("Test failed"); let sender_balance = Amount::try_from_slice(&value.expect("Test failed")) .expect("Test failed"); assert_eq!(sender_balance, transfer.transfer.amount); let escrow_key = balance_key(&token, &BRIDGE_POOL_ADDRESS); - let value = - wl_storage.read_bytes(&escrow_key).expect("Test failed"); + let value = state.read_bytes(&escrow_key).expect("Test failed"); let escrow_balance = Amount::try_from_slice(&value.expect("Test failed")) .expect("Test failed"); @@ -1248,8 +1199,8 @@ mod tests { #[test] fn test_redeem_native_token() -> Result<()> { - let mut wl_storage = TestWlStorage::default(); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); + let mut state = TestState::default(); + test_utils::bootstrap_ethereum_bridge(&mut state); let receiver = address::testing::established_address_1(); let amount = Amount::from(100); @@ -1259,7 +1210,7 @@ mod tests { &receiver, ); assert!( - wl_storage + state .read_bytes(&receiver_wnam_balance_key) .unwrap() .is_none() @@ -1268,28 +1219,28 @@ mod tests { let bridge_pool_initial_balance = Amount::from(100_000_000); let bridge_pool_native_token_balance_key = token::storage_key::balance_key( - &wl_storage.storage.native_token, + &state.in_mem().native_token, &BRIDGE_ADDRESS, ); let bridge_pool_native_erc20_supply_key = minted_balance_key(&wrapped_erc20s::token(&wnam())); StorageWrite::write( - &mut wl_storage, + &mut state, &bridge_pool_native_token_balance_key, bridge_pool_initial_balance, )?; StorageWrite::write( - &mut wl_storage, + &mut state, &bridge_pool_native_erc20_supply_key, amount, )?; let receiver_native_token_balance_key = token::storage_key::balance_key( - &wl_storage.storage.native_token, + &state.in_mem().native_token, &receiver, ); let changed_keys = - redeem_native_token(&mut wl_storage, &wnam(), &receiver, &amount)?; + redeem_native_token(&mut state, &wnam(), &receiver, &amount)?; assert_eq!( changed_keys, @@ -1300,21 +1251,15 @@ mod tests { ]) ); assert_eq!( - StorageRead::read( - &wl_storage, - &bridge_pool_native_token_balance_key - )?, + StorageRead::read(&state, &bridge_pool_native_token_balance_key)?, Some(bridge_pool_initial_balance - amount) ); assert_eq!( - StorageRead::read(&wl_storage, &receiver_native_token_balance_key)?, + StorageRead::read(&state, &receiver_native_token_balance_key)?, Some(amount) ); assert_eq!( - StorageRead::read( - &wl_storage, - &bridge_pool_native_erc20_supply_key - )?, + StorageRead::read(&state, &bridge_pool_native_erc20_supply_key)?, Some(Amount::zero()) ); @@ -1322,7 +1267,7 @@ mod tests { // // wNAM is never minted, it's converted back to NAM assert!( - wl_storage + state .read_bytes(&receiver_wnam_balance_key) .unwrap() .is_none() @@ -1334,16 +1279,16 @@ mod tests { /// Auxiliary function to test wrapped Ethereum ERC20s functionality. fn test_wrapped_erc20s_aux(mut f: F) where - F: FnMut(&mut TestWlStorage, EthereumEvent), + F: FnMut(&mut TestState, EthereumEvent), { - let mut wl_storage = TestWlStorage::default(); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); - wl_storage.commit_block().expect("Test failed"); - init_storage(&mut wl_storage); + let mut state = TestState::default(); + test_utils::bootstrap_ethereum_bridge(&mut state); + state.commit_block().expect("Test failed"); + init_storage(&mut state); let native_erc20 = - read_native_erc20_address(&wl_storage).expect("Test failed"); + read_native_erc20_address(&state).expect("Test failed"); let pending_transfers = init_bridge_pool_transfers( - &mut wl_storage, + &mut state, [ (native_erc20, TransferData::default()), ( @@ -1372,7 +1317,7 @@ mod tests { ), ], ); - init_balance(&mut wl_storage, &pending_transfers); + init_balance(&mut state, &pending_transfers); let transfers = pending_transfers .into_iter() .map(|ref transfer| { @@ -1386,7 +1331,7 @@ mod tests { transfers, relayer, }; - f(&mut wl_storage, event) + f(&mut state, event) } #[test] @@ -1401,7 +1346,7 @@ mod tests { kind: eth_bridge_pool::TransferToEthereumKind, } - test_wrapped_erc20s_aux(|wl_storage, event| { + test_wrapped_erc20s_aux(|state, event| { let transfers = match &event { EthereumEvent::TransfersToEthereum { transfers, .. } => { transfers.iter() @@ -1409,7 +1354,7 @@ mod tests { _ => panic!("Test failed"), }; let native_erc20 = - read_native_erc20_address(wl_storage).expect("Test failed"); + read_native_erc20_address(state).expect("Test failed"); let deltas = transfers .filter_map( |event @ TransferToEthereum { asset, amount, .. }| { @@ -1417,7 +1362,7 @@ mod tests { return None; } let kind = { - let (pending, _) = wl_storage + let (pending, _) = state .ethbridge_queries() .lookup_transfer_to_eth(event) .expect("Test failed"); @@ -1431,13 +1376,13 @@ mod tests { wrapped_erc20s::nut(asset) } }; - let prev_balance = wl_storage + let prev_balance = state .read(&balance_key( &erc20_token, &BRIDGE_POOL_ADDRESS, )) .expect("Test failed"); - let prev_supply = wl_storage + let prev_supply = state .read(&minted_balance_key(&erc20_token)) .expect("Test failed"); Some(Delta { @@ -1451,7 +1396,7 @@ mod tests { ) .collect::>(); - _ = act_on(wl_storage, event).unwrap(); + _ = act_on(state, event).unwrap(); for Delta { kind, @@ -1479,11 +1424,11 @@ mod tests { } }; - let balance: token::Amount = wl_storage + let balance: token::Amount = state .read(&balance_key(&erc20_token, &BRIDGE_POOL_ADDRESS)) .expect("Read must succeed") .expect("Balance must exist"); - let supply: token::Amount = wl_storage + let supply: token::Amount = state .read(&minted_balance_key(&erc20_token)) .expect("Read must succeed") .expect("Balance must exist"); @@ -1500,44 +1445,44 @@ mod tests { /// Namada and instead are kept in escrow, under the Ethereum bridge /// account. fn test_wrapped_nam_not_burned() { - test_wrapped_erc20s_aux(|wl_storage, event| { + test_wrapped_erc20s_aux(|state, event| { let native_erc20 = - read_native_erc20_address(wl_storage).expect("Test failed"); + read_native_erc20_address(state).expect("Test failed"); let wnam = wrapped_erc20s::token(&native_erc20); let escrow_balance_key = balance_key(&nam(), &BRIDGE_ADDRESS); // check pre supply assert!( - wl_storage + state .read_bytes(&balance_key(&wnam, &BRIDGE_POOL_ADDRESS)) .expect("Test failed") .is_none() ); assert!( - wl_storage + state .read_bytes(&minted_balance_key(&wnam)) .expect("Test failed") .is_none() ); // check pre balance - let pre_escrowed_balance: token::Amount = wl_storage + let pre_escrowed_balance: token::Amount = state .read(&escrow_balance_key) .expect("Read must succeed") .expect("Balance must exist"); - _ = act_on(wl_storage, event).unwrap(); + _ = act_on(state, event).unwrap(); // check post supply - the wNAM minted supply should increase // by the transferred amount assert!( - wl_storage + state .read_bytes(&balance_key(&wnam, &BRIDGE_POOL_ADDRESS)) .expect("Test failed") .is_none() ); assert_eq!( - wl_storage + state .read::(&minted_balance_key(&wnam)) .expect("Reading from storage should not fail") .expect("The wNAM supply should have been updated"), @@ -1545,7 +1490,7 @@ mod tests { ); // check post balance - let post_escrowed_balance: token::Amount = wl_storage + let post_escrowed_balance: token::Amount = state .read(&escrow_balance_key) .expect("Read must succeed") .expect("Balance must exist"); @@ -1560,8 +1505,8 @@ mod tests { #[test] #[should_panic(expected = "Attempted to mint wNAM NUTs!")] fn test_wnam_doesnt_mint_nuts() { - let mut wl_storage = TestWlStorage::default(); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); + let mut state = TestState::default(); + test_utils::bootstrap_ethereum_bridge(&mut state); let transfer = PendingTransfer { transfer: eth_bridge_pool::TransferToEthereum { @@ -1578,6 +1523,6 @@ mod tests { }, }; - _ = update_transferred_asset_balances(&mut wl_storage, &transfer); + _ = update_transferred_asset_balances(&mut state, &transfer); } } diff --git a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs index b9b2fe2731..99dec94cb4 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs @@ -8,15 +8,15 @@ use std::collections::{BTreeSet, HashMap, HashSet}; use borsh::BorshDeserialize; use eth_msgs::EthMsgUpdate; use eyre::Result; -use namada_core::types::address::Address; -use namada_core::types::ethereum_events::EthereumEvent; -use namada_core::types::ethereum_structs::EthBridgeEvent; -use namada_core::types::key::common; -use namada_core::types::storage::{BlockHeight, Epoch, Key}; -use namada_core::types::token::Amount; +use namada_core::address::Address; +use namada_core::ethereum_events::EthereumEvent; +use namada_core::ethereum_structs::EthBridgeEvent; +use namada_core::key::common; +use namada_core::storage::{BlockHeight, Epoch, Key}; +use namada_core::token::Amount; use namada_proof_of_stake::pos_queries::PosQueries; use namada_state::tx_queue::ExpiredTx; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_tx::data::TxResult; use namada_vote_ext::ethereum_events::{MultiSignedEthEvent, SignedVext, Vext}; @@ -31,7 +31,7 @@ impl utils::GetVoters for &HashSet { #[inline] fn get_voters(self) -> HashSet<(Address, BlockHeight)> { self.iter().fold(HashSet::new(), |mut voters, update| { - voters.extend(update.seen_by.clone().into_iter()); + voters.extend(update.seen_by.clone()); voters }) } @@ -43,7 +43,7 @@ impl utils::GetVoters for &HashSet { /// __INVARIANT__: Assume `ethereum_events` are sorted in ascending /// order. pub fn sign_ethereum_events( - wl_storage: &WlStorage, + state: &WlState, validator_addr: &Address, protocol_key: &common::SecretKey, ethereum_events: Vec, @@ -52,12 +52,12 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - if !wl_storage.ethbridge_queries().is_bridge_active() { + if !state.ethbridge_queries().is_bridge_active() { return None; } let ext = Vext { - block_height: wl_storage.storage.get_last_block_height(), + block_height: state.in_mem().get_last_block_height(), validator_addr: validator_addr.clone(), ethereum_events, }; @@ -81,14 +81,14 @@ where /// This function is deterministic based on some existing blockchain state and /// the passed `events`. pub fn apply_derived_tx( - wl_storage: &mut WlStorage, + state: &mut WlState, events: Vec, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let mut changed_keys = timeout_events(wl_storage)?; + let mut changed_keys = timeout_events(state)?; if events.is_empty() { return Ok(TxResult { changed_keys, @@ -105,17 +105,17 @@ where .into_iter() .filter_map(|multisigned| { // NB: discard events with outdated nonces - wl_storage + state .ethbridge_queries() .validate_eth_event_nonce(&multisigned.event) .then(|| EthMsgUpdate::from(multisigned)) }) .collect(); - let voting_powers = utils::get_voting_powers(wl_storage, &updates)?; + let voting_powers = utils::get_voting_powers(state, &updates)?; let (mut apply_updates_keys, eth_bridge_events) = - apply_updates(wl_storage, updates, voting_powers)?; + apply_updates(state, updates, voting_powers)?; changed_keys.append(&mut apply_updates_keys); Ok(TxResult { @@ -131,7 +131,7 @@ where /// The `voting_powers` map must contain a voting power for all /// `(Address, BlockHeight)`s that occur in any of the `updates`. pub(super) fn apply_updates( - wl_storage: &mut WlStorage, + state: &mut WlState, updates: HashSet, voting_powers: HashMap<(Address, BlockHeight), Amount>, ) -> Result<(ChangedKeys, BTreeSet)> @@ -152,7 +152,7 @@ where // The order in which updates are applied to storage does not matter. // The final storage state will be the same regardless. let (mut changed, newly_confirmed) = - apply_update(wl_storage, update.clone(), &voting_powers)?; + apply_update(state, update.clone(), &voting_powers)?; changed_keys.append(&mut changed); if newly_confirmed { confirmed.push(update.body); @@ -167,8 +167,7 @@ where // Right now, the order in which events are acted on does not matter. // For `TransfersToNamada` events, they can happen in any order. for event in confirmed { - let (mut changed, mut new_tx_events) = - events::act_on(wl_storage, event)?; + let (mut changed, mut new_tx_events) = events::act_on(state, event)?; changed_keys.append(&mut changed); tx_events.append(&mut new_tx_events); } @@ -181,7 +180,7 @@ where /// The `voting_powers` map must contain a voting power for all /// `(Address, BlockHeight)`s that occur in `update`. fn apply_update( - wl_storage: &mut WlStorage, + state: &mut WlState, update: EthMsgUpdate, voting_powers: &HashMap<(Address, BlockHeight), Amount>, ) -> Result<(ChangedKeys, bool)> @@ -191,7 +190,7 @@ where { let eth_msg_keys = vote_tallies::Keys::from(&update.body); let exists_in_storage = if let Some(seen) = - votes::storage::maybe_read_seen(wl_storage, ð_msg_keys)? + votes::storage::maybe_read_seen(state, ð_msg_keys)? { if seen { tracing::debug!(?update, "Ethereum event is already seen"); @@ -206,7 +205,7 @@ where if !exists_in_storage { tracing::debug!(%eth_msg_keys.prefix, "Ethereum event not seen before by any validator"); let vote_tracking = - calculate_new(wl_storage, update.seen_by, voting_powers)?; + calculate_new(state, update.seen_by, voting_powers)?; let changed = eth_msg_keys.into_iter().collect(); let confirmed = vote_tracking.seen; (vote_tracking, changed, confirmed, false) @@ -218,7 +217,7 @@ where let new_votes = NewVotes::new(update.seen_by.clone(), voting_powers)?; let (vote_tracking, changed) = - votes::update::calculate(wl_storage, ð_msg_keys, new_votes)?; + votes::update::calculate(state, ð_msg_keys, new_votes)?; if changed.is_empty() { return Ok((changed, false)); } @@ -228,7 +227,7 @@ where }; votes::storage::write( - wl_storage, + state, ð_msg_keys, &update.body, &vote_tracking, @@ -238,18 +237,18 @@ where Ok((changed, confirmed)) } -fn timeout_events(wl_storage: &mut WlStorage) -> Result +fn timeout_events(state: &mut WlState) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { let mut changed = ChangedKeys::new(); - for keys in get_timed_out_eth_events(wl_storage)? { + for keys in get_timed_out_eth_events(state)? { tracing::debug!( %keys.prefix, "Ethereum event timed out", ); - if let Some(event) = votes::storage::delete(wl_storage, &keys)? { + if let Some(event) = votes::storage::delete(state, &keys)? { tracing::debug!( %keys.prefix, "Queueing Ethereum event for retransmission", @@ -260,8 +259,8 @@ where // replaying ethereum events has no effect on the ledger. // however, we may need to revisit this code if we ever // implement slashing on double voting of ethereum events. - wl_storage - .storage + state + .in_mem_mut() .expired_txs_queue .push(ExpiredTx::EthereumEvent(event)); } @@ -272,14 +271,14 @@ where } fn get_timed_out_eth_events( - wl_storage: &mut WlStorage, + state: &mut WlState, ) -> Result>> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let unbonding_len = wl_storage.pos_queries().get_pos_params().unbonding_len; - let current_epoch = wl_storage.storage.last_epoch; + let unbonding_len = state.pos_queries().get_pos_params().unbonding_len; + let current_epoch = state.in_mem().last_epoch; if current_epoch.0 <= unbonding_len { return Ok(Vec::new()); } @@ -290,7 +289,7 @@ where let mut is_timed_out = false; let mut is_seen = false; let mut results = Vec::new(); - for (key, val, _) in votes::storage::iter_prefix(wl_storage, &prefix)? { + for (key, val, _) in votes::storage::iter_prefix(state, &prefix)? { let key = Key::parse(key).expect("The key should be parsable"); if let Some(keys) = vote_tallies::eth_event_keys(&key) { match &cur_keys { @@ -334,20 +333,14 @@ where #[cfg(test)] mod tests { - use std::collections::{BTreeSet, HashMap, HashSet}; - - use borsh::BorshDeserialize; - use namada_core::types::address; - use namada_core::types::ethereum_events::testing::{ + use namada_core::address; + use namada_core::ethereum_events::testing::{ arbitrary_amount, arbitrary_eth_address, arbitrary_nonce, arbitrary_single_transfer, DAI_ERC20_ETH_ADDRESS, }; - use namada_core::types::ethereum_events::{ - EthereumEvent, TransferToNamada, - }; - use namada_core::types::voting_power::FractionalVotingPower; - use namada_state::testing::TestWlStorage; - use namada_storage::mockdb::MockDBWriteBatch; + use namada_core::ethereum_events::TransferToNamada; + use namada_core::voting_power::FractionalVotingPower; + use namada_state::testing::TestState; use namada_storage::StorageRead; use super::*; @@ -393,9 +386,9 @@ mod tests { (sole_validator.clone(), BlockHeight(100)), validator_stake, )]); - let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (mut state, _) = test_utils::setup_default_storage(); test_utils::whitelist_tokens( - &mut wl_storage, + &mut state, [( DAI_ERC20_ETH_ADDRESS, test_utils::WhitelistMeta { @@ -406,7 +399,7 @@ mod tests { ); let (changed_keys, _) = - apply_updates(&mut wl_storage, updates, voting_powers)?; + apply_updates(&mut state, updates, voting_powers)?; let eth_msg_keys: vote_tallies::Keys = (&body).into(); let wrapped_erc20_token = wrapped_erc20s::token(&asset); @@ -423,34 +416,34 @@ mod tests { changed_keys ); - let body_bytes = wl_storage.read_bytes(ð_msg_keys.body())?; + let body_bytes = state.read_bytes(ð_msg_keys.body())?; let body_bytes = body_bytes.unwrap(); assert_eq!(EthereumEvent::try_from_slice(&body_bytes)?, body); - let seen_bytes = wl_storage.read_bytes(ð_msg_keys.seen())?; + let seen_bytes = state.read_bytes(ð_msg_keys.seen())?; let seen_bytes = seen_bytes.unwrap(); assert!(bool::try_from_slice(&seen_bytes)?); - let seen_by_bytes = wl_storage.read_bytes(ð_msg_keys.seen_by())?; + let seen_by_bytes = state.read_bytes(ð_msg_keys.seen_by())?; let seen_by_bytes = seen_by_bytes.unwrap(); assert_eq!( Votes::try_from_slice(&seen_by_bytes)?, Votes::from([(sole_validator, BlockHeight(100))]) ); - let voting_power = wl_storage + let voting_power = state .read::(ð_msg_keys.voting_power())? .expect("Test failed") - .fractional_stake(&wl_storage); + .fractional_stake(&state); assert_eq!(voting_power, FractionalVotingPower::WHOLE); let epoch_bytes = - wl_storage.read_bytes(ð_msg_keys.voting_started_epoch())?; + state.read_bytes(ð_msg_keys.voting_started_epoch())?; let epoch_bytes = epoch_bytes.unwrap(); assert_eq!(Epoch::try_from_slice(&epoch_bytes)?, Epoch(0)); - let wrapped_erc20_balance_bytes = wl_storage - .read_bytes(&balance_key(&wrapped_erc20_token, &receiver))?; + let wrapped_erc20_balance_bytes = + state.read_bytes(&balance_key(&wrapped_erc20_token, &receiver))?; let wrapped_erc20_balance_bytes = wrapped_erc20_balance_bytes.unwrap(); assert_eq!( Amount::try_from_slice(&wrapped_erc20_balance_bytes)?, @@ -458,7 +451,7 @@ mod tests { ); let wrapped_erc20_supply_bytes = - wl_storage.read_bytes(&minted_balance_key(&wrapped_erc20_token))?; + state.read_bytes(&minted_balance_key(&wrapped_erc20_token))?; let wrapped_erc20_supply_bytes = wrapped_erc20_supply_bytes.unwrap(); assert_eq!( Amount::try_from_slice(&wrapped_erc20_supply_bytes)?, @@ -474,12 +467,12 @@ mod tests { /// that it is recorded in storage fn test_apply_derived_tx_new_event_mint_immediately() { let sole_validator = address::testing::established_address_2(); - let (mut wl_storage, _) = + let (mut state, _) = test_utils::setup_storage_with_validators(HashMap::from_iter( vec![(sole_validator.clone(), Amount::native_whole(100))], )); test_utils::whitelist_tokens( - &mut wl_storage, + &mut state, [( DAI_ERC20_ETH_ADDRESS, test_utils::WhitelistMeta { @@ -500,7 +493,7 @@ mod tests { }; let result = apply_derived_tx( - &mut wl_storage, + &mut state, vec![MultiSignedEthEvent { event: event.clone(), signers: BTreeSet::from([(sole_validator, BlockHeight(100))]), @@ -544,7 +537,7 @@ mod tests { fn test_apply_derived_tx_new_event_dont_mint() { let validator_a = address::testing::established_address_2(); let validator_b = address::testing::established_address_3(); - let (mut wl_storage, _) = test_utils::setup_storage_with_validators( + let (mut state, _) = test_utils::setup_storage_with_validators( HashMap::from_iter(vec![ (validator_a.clone(), Amount::native_whole(100)), (validator_b, Amount::native_whole(100)), @@ -562,7 +555,7 @@ mod tests { }; let result = apply_derived_tx( - &mut wl_storage, + &mut state, vec![MultiSignedEthEvent { event: event.clone(), signers: BTreeSet::from([(validator_a, BlockHeight(100))]), @@ -596,7 +589,7 @@ mod tests { pub fn test_apply_derived_tx_duplicates() -> Result<()> { let validator_a = address::testing::established_address_2(); let validator_b = address::testing::established_address_3(); - let (mut wl_storage, _) = test_utils::setup_storage_with_validators( + let (mut state, _) = test_utils::setup_storage_with_validators( HashMap::from_iter(vec![ (validator_a.clone(), Amount::native_whole(100)), (validator_b, Amount::native_whole(100)), @@ -620,7 +613,7 @@ mod tests { let multisigneds = vec![multisigned.clone(), multisigned]; - let result = apply_derived_tx(&mut wl_storage, multisigneds); + let result = apply_derived_tx(&mut state, multisigneds); let tx_result = match result { Ok(tx_result) => tx_result, Err(err) => panic!("unexpected error: {:#?}", err), @@ -639,17 +632,17 @@ mod tests { "One vote for the Ethereum event should have been recorded", ); - let seen_by_bytes = wl_storage.read_bytes(ð_msg_keys.seen_by())?; + let seen_by_bytes = state.read_bytes(ð_msg_keys.seen_by())?; let seen_by_bytes = seen_by_bytes.unwrap(); assert_eq!( Votes::try_from_slice(&seen_by_bytes)?, Votes::from([(validator_a, BlockHeight(100))]) ); - let voting_power = wl_storage + let voting_power = state .read::(ð_msg_keys.voting_power())? .expect("Test failed") - .fractional_stake(&wl_storage); + .fractional_stake(&state); assert_eq!(voting_power, FractionalVotingPower::HALF); Ok(()) @@ -717,7 +710,7 @@ mod tests { pub fn test_timeout_events() { let validator_a = address::testing::established_address_2(); let validator_b = address::testing::established_address_3(); - let (mut wl_storage, _) = test_utils::setup_storage_with_validators( + let (mut state, _) = test_utils::setup_storage_with_validators( HashMap::from_iter(vec![ (validator_a.clone(), Amount::native_whole(100)), (validator_b, Amount::native_whole(100)), @@ -734,7 +727,7 @@ mod tests { }], }; let _result = apply_derived_tx( - &mut wl_storage, + &mut state, vec![MultiSignedEthEvent { event: event.clone(), signers: BTreeSet::from([( @@ -746,15 +739,15 @@ mod tests { let prev_keys = vote_tallies::Keys::from(&event); // commit then update the epoch - wl_storage.storage.commit_block(MockDBWriteBatch).unwrap(); + state.commit_block().unwrap(); let unbonding_len = - namada_proof_of_stake::storage::read_pos_params(&wl_storage) + namada_proof_of_stake::storage::read_pos_params(&state) .expect("Test failed") .unbonding_len + 1; - wl_storage.storage.last_epoch = - wl_storage.storage.last_epoch + unbonding_len; - wl_storage.storage.block.epoch = wl_storage.storage.last_epoch + 1_u64; + state.in_mem_mut().last_epoch = + state.in_mem().last_epoch + unbonding_len; + state.in_mem_mut().block.epoch = state.in_mem().last_epoch + 1_u64; let new_event = EthereumEvent::TransfersToNamada { nonce: 1.into(), @@ -765,7 +758,7 @@ mod tests { }], }; let result = apply_derived_tx( - &mut wl_storage, + &mut state, vec![MultiSignedEthEvent { event: new_event.clone(), signers: BTreeSet::from([(validator_a, BlockHeight(100))]), @@ -794,14 +787,14 @@ mod tests { "New event should be inserted and the previous one should be \ deleted", ); - assert!(wl_storage.read_bytes(&prev_keys.body()).unwrap().is_none()); - assert!(wl_storage.read_bytes(&new_keys.body()).unwrap().is_some()); + assert!(state.read_bytes(&prev_keys.body()).unwrap().is_none()); + assert!(state.read_bytes(&new_keys.body()).unwrap().is_some()); } /// Helper fn to [`test_timeout_events_before_state_upds`]. fn check_event_keys( keys: &Keys, - wl_storage: &TestWlStorage, + state: &TestState, result: Result, mut assert: F, ) where @@ -811,19 +804,16 @@ mod tests { Ok(tx_result) => tx_result, Err(err) => panic!("unexpected error: {:#?}", err), }; - assert(KeyKind::Body, wl_storage.read_bytes(&keys.body()).unwrap()); - assert(KeyKind::Seen, wl_storage.read_bytes(&keys.seen()).unwrap()); - assert( - KeyKind::SeenBy, - wl_storage.read_bytes(&keys.seen_by()).unwrap(), - ); + assert(KeyKind::Body, state.read_bytes(&keys.body()).unwrap()); + assert(KeyKind::Seen, state.read_bytes(&keys.seen()).unwrap()); + assert(KeyKind::SeenBy, state.read_bytes(&keys.seen_by()).unwrap()); assert( KeyKind::VotingPower, - wl_storage.read_bytes(&keys.voting_power()).unwrap(), + state.read_bytes(&keys.voting_power()).unwrap(), ); assert( KeyKind::Epoch, - wl_storage.read_bytes(&keys.voting_started_epoch()).unwrap(), + state.read_bytes(&keys.voting_started_epoch()).unwrap(), ); assert_eq!( tx_result.changed_keys, @@ -844,7 +834,7 @@ mod tests { fn test_timeout_events_before_state_upds() { let validator_a = address::testing::established_address_2(); let validator_b = address::testing::established_address_3(); - let (mut wl_storage, _) = test_utils::setup_storage_with_validators( + let (mut state, _) = test_utils::setup_storage_with_validators( HashMap::from_iter(vec![ (validator_a.clone(), Amount::native_whole(100)), (validator_b.clone(), Amount::native_whole(100)), @@ -863,54 +853,54 @@ mod tests { let keys = vote_tallies::Keys::from(&event); let result = apply_derived_tx( - &mut wl_storage, + &mut state, vec![MultiSignedEthEvent { event: event.clone(), signers: BTreeSet::from([(validator_a, BlockHeight(100))]), }], ); - check_event_keys(&keys, &wl_storage, result, |key_kind, value| match ( - key_kind, value, - ) { - (_, None) => panic!("Test failed"), - (KeyKind::VotingPower, Some(power)) => { - let power = EpochedVotingPower::try_from_slice(&power) - .expect("Test failed") - .fractional_stake(&wl_storage); - assert_eq!(power, FractionalVotingPower::HALF); + check_event_keys(&keys, &state, result, |key_kind, value| { + match (key_kind, value) { + (_, None) => panic!("Test failed"), + (KeyKind::VotingPower, Some(power)) => { + let power = EpochedVotingPower::try_from_slice(&power) + .expect("Test failed") + .fractional_stake(&state); + assert_eq!(power, FractionalVotingPower::HALF); + } + (_, Some(_)) => {} } - (_, Some(_)) => {} }); // commit then update the epoch - wl_storage.storage.commit_block(MockDBWriteBatch).unwrap(); + state.commit_block().unwrap(); let unbonding_len = - namada_proof_of_stake::storage::read_pos_params(&wl_storage) + namada_proof_of_stake::storage::read_pos_params(&state) .expect("Test failed") .unbonding_len + 1; - wl_storage.storage.last_epoch = - wl_storage.storage.last_epoch + unbonding_len; - wl_storage.storage.block.epoch = wl_storage.storage.last_epoch + 1_u64; + state.in_mem_mut().last_epoch = + state.in_mem().last_epoch + unbonding_len; + state.in_mem_mut().block.epoch = state.in_mem().last_epoch + 1_u64; let result = apply_derived_tx( - &mut wl_storage, + &mut state, vec![MultiSignedEthEvent { event, signers: BTreeSet::from([(validator_b, BlockHeight(100))]), }], ); - check_event_keys(&keys, &wl_storage, result, |key_kind, value| match ( - key_kind, value, - ) { - (_, None) => panic!("Test failed"), - (KeyKind::VotingPower, Some(power)) => { - let power = EpochedVotingPower::try_from_slice(&power) - .expect("Test failed") - .fractional_stake(&wl_storage); - assert_eq!(power, FractionalVotingPower::HALF); + check_event_keys(&keys, &state, result, |key_kind, value| { + match (key_kind, value) { + (_, None) => panic!("Test failed"), + (KeyKind::VotingPower, Some(power)) => { + let power = EpochedVotingPower::try_from_slice(&power) + .expect("Test failed") + .fractional_stake(&state); + assert_eq!(power, FractionalVotingPower::HALF); + } + (_, Some(_)) => {} } - (_, Some(_)) => {} }); } @@ -918,7 +908,7 @@ mod tests { /// not result in votes in storage. #[test] fn test_apply_derived_tx_outdated_nonce() -> Result<()> { - let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (mut state, _) = test_utils::setup_default_storage(); let new_multisigned = |nonce: u64| { let (validator, _) = test_utils::default_validator(); @@ -943,7 +933,7 @@ mod tests { ($nonce:expr) => { let (multisigned, event) = new_multisigned($nonce); let tx_result = - apply_derived_tx(&mut wl_storage, vec![multisigned])?; + apply_derived_tx(&mut state, vec![multisigned])?; let eth_msg_keys = vote_tallies::Keys::from(&event); assert!( @@ -951,9 +941,7 @@ mod tests { "The Ethereum event should have been seen", ); assert_eq!( - wl_storage - .ethbridge_queries() - .get_next_nam_transfers_nonce(), + state.ethbridge_queries().get_next_nam_transfers_nonce(), ($nonce + 1).into(), "The transfers to Namada nonce should have been \ incremented", @@ -964,7 +952,7 @@ mod tests { ($nonce:expr) => { let (multisigned, event) = new_multisigned($nonce); let tx_result = - apply_derived_tx(&mut wl_storage, vec![multisigned])?; + apply_derived_tx(&mut state, vec![multisigned])?; let eth_msg_keys = vote_tallies::Keys::from(&event); assert!( @@ -972,9 +960,7 @@ mod tests { "The Ethereum event should have been ignored", ); assert_eq!( - wl_storage - .ethbridge_queries() - .get_next_nam_transfers_nonce(), + state.ethbridge_queries().get_next_nam_transfers_nonce(), NEXT_NONCE_TO_PROCESS.into(), "The transfers to Namada nonce should not have changed", ); diff --git a/crates/ethereum_bridge/src/protocol/transactions/mod.rs b/crates/ethereum_bridge/src/protocol/transactions/mod.rs index 52833e7790..5b249deb7d 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/mod.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/mod.rs @@ -14,7 +14,7 @@ pub mod votes; use std::collections::BTreeSet; -use namada_core::types::storage; +use namada_core::storage; /// The keys changed while applying a protocol transaction. pub type ChangedKeys = BTreeSet; diff --git a/crates/ethereum_bridge/src/protocol/transactions/read.rs b/crates/ethereum_bridge/src/protocol/transactions/read.rs index c618b0335d..9d45d2cbe4 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/read.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/read.rs @@ -1,48 +1,47 @@ //! Helpers for reading from storage use borsh::BorshDeserialize; use eyre::{eyre, Result}; -use namada_core::types::storage; -use namada_core::types::token::Amount; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_core::storage; +use namada_core::token::Amount; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_storage::StorageRead; /// Returns the stored Amount, or 0 if not stored pub(super) fn amount_or_default( - wl_storage: &WlStorage, + state: &WlState, key: &storage::Key, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - Ok(maybe_value(wl_storage, key)?.unwrap_or_default()) + Ok(maybe_value(state, key)?.unwrap_or_default()) } /// Read some arbitrary value from storage, erroring if it's not found pub(super) fn value( - wl_storage: &WlStorage, + state: &WlState, key: &storage::Key, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - maybe_value(wl_storage, key)? - .ok_or_else(|| eyre!("no value found at {}", key)) + maybe_value(state, key)?.ok_or_else(|| eyre!("no value found at {}", key)) } /// Try to read some arbitrary value from storage, returning `None` if nothing /// is read. This will still error if there is data stored at `key` but it is /// not deserializable to `T`. pub(super) fn maybe_value( - wl_storage: &WlStorage, + state: &WlState, key: &storage::Key, ) -> Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let maybe_val = wl_storage.read_bytes(key)?; + let maybe_val = state.read_bytes(key)?; let bytes = match maybe_val { Some(bytes) => bytes, None => return Ok(None), @@ -54,16 +53,16 @@ where #[cfg(test)] mod tests { use assert_matches::assert_matches; - use namada_core::types::storage; - use namada_core::types::token::Amount; - use namada_state::testing::TestWlStorage; + use namada_core::storage; + use namada_core::token::Amount; + use namada_state::testing::TestState; use namada_storage::StorageWrite; use crate::protocol::transactions::read; #[test] fn test_amount_returns_zero_for_uninitialized_storage() { - let fake_storage = TestWlStorage::default(); + let fake_storage = TestState::default(); let amt = read::amount_or_default( &fake_storage, &storage::Key::parse("some arbitrary key with no stored value") @@ -77,7 +76,7 @@ mod tests { fn test_amount_returns_stored_amount() { let key = storage::Key::parse("some arbitrary key").unwrap(); let amount = Amount::from(1_000_000); - let mut fake_storage = TestWlStorage::default(); + let mut fake_storage = TestState::default(); fake_storage.write(&key, amount).unwrap(); let amt = read::amount_or_default(&fake_storage, &key).unwrap(); @@ -88,7 +87,7 @@ mod tests { fn test_amount_errors_if_not_amount() { let key = storage::Key::parse("some arbitrary key").unwrap(); let amount = "not an Amount type"; - let mut fake_storage = TestWlStorage::default(); + let mut fake_storage = TestState::default(); fake_storage.write(&key, amount).unwrap(); assert_matches!(read::amount_or_default(&fake_storage, &key), Err(_)); diff --git a/crates/ethereum_bridge/src/protocol/transactions/update.rs b/crates/ethereum_bridge/src/protocol/transactions/update.rs index ee258774a2..0940857958 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/update.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/update.rs @@ -1,15 +1,15 @@ //! Helpers for writing to storage use eyre::Result; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::hash::StorageHasher; -use namada_core::types::storage; -use namada_core::types::token::{Amount, AmountError}; -use namada_state::{DBIter, WlStorage, DB}; +use namada_core::hash::StorageHasher; +use namada_core::storage; +use namada_core::token::{Amount, AmountError}; +use namada_state::{DBIter, WlState, DB}; use namada_storage::StorageWrite; /// Reads the `Amount` from key, applies update then writes it back pub fn amount( - wl_storage: &mut WlStorage, + state: &mut WlState, key: &storage::Key, update: impl FnOnce(&mut Amount) -> Result<(), AmountError>, ) -> Result @@ -17,16 +17,16 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let mut amount = super::read::amount_or_default(wl_storage, key)?; + let mut amount = super::read::amount_or_default(state, key)?; update(&mut amount)?; - wl_storage.write(key, amount)?; + state.write(key, amount)?; Ok(amount) } #[allow(dead_code)] /// Reads an arbitrary value, applies update then writes it back pub fn value( - wl_storage: &mut WlStorage, + state: &mut WlState, key: &storage::Key, update: impl FnOnce(&mut T), ) -> Result @@ -34,18 +34,17 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let mut value = super::read::value(wl_storage, key)?; + let mut value = super::read::value(state, key)?; update(&mut value); - wl_storage.write(key, &value)?; + state.write(key, &value)?; Ok(value) } #[cfg(test)] mod tests { - use eyre::{eyre, Result}; - use namada_core::types::storage; - use namada_state::testing::TestWlStorage; - use namada_storage::{StorageRead, StorageWrite}; + use eyre::eyre; + use namada_state::testing::TestState; + use namada_storage::StorageRead; use super::*; @@ -55,14 +54,12 @@ mod tests { let key = storage::Key::parse("some arbitrary key") .expect("could not set up test"); let value = 21i32; - let mut wl_storage = TestWlStorage::default(); - wl_storage - .write(&key, value) - .expect("could not set up test"); + let mut state = TestState::default(); + state.write(&key, value).expect("could not set up test"); - super::value(&mut wl_storage, &key, |v: &mut i32| *v *= 2)?; + super::value(&mut state, &key, |v: &mut i32| *v *= 2)?; - let new_val = wl_storage.read_bytes(&key)?; + let new_val = state.read_bytes(&key)?; let new_val = match new_val { Some(new_val) => ::try_from_slice(&new_val)?, None => return Err(eyre!("no value found")), diff --git a/crates/ethereum_bridge/src/protocol/transactions/utils.rs b/crates/ethereum_bridge/src/protocol/transactions/utils.rs index 63eaf8530e..5f57f11849 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/utils.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/utils.rs @@ -2,12 +2,12 @@ use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use eyre::eyre; use itertools::Itertools; -use namada_core::types::address::Address; -use namada_core::types::storage::BlockHeight; -use namada_core::types::token; +use namada_core::address::Address; +use namada_core::storage::BlockHeight; +use namada_core::token; use namada_proof_of_stake::pos_queries::PosQueries; use namada_proof_of_stake::types::WeightedValidator; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; /// Proof of some arbitrary tally whose voters can be queried. pub(super) trait GetVoters { @@ -22,7 +22,7 @@ pub(super) trait GetVoters { /// which they signed some arbitrary object, and whose values are the voting /// powers of these validators at the key's given block height. pub(super) fn get_voting_powers( - wl_storage: &WlStorage, + state: &WlState, proof: P, ) -> eyre::Result> where @@ -34,7 +34,7 @@ where tracing::debug!(?voters, "Got validators who voted on at least one event"); let consensus_validators = get_consensus_validators( - wl_storage, + state, voters.iter().map(|(_, h)| h.to_owned()).collect(), ); tracing::debug!( @@ -55,7 +55,7 @@ where // TODO: we might be able to remove allocation here pub(super) fn get_consensus_validators( - wl_storage: &WlStorage, + state: &WlState, block_heights: HashSet, ) -> BTreeMap> where @@ -64,12 +64,12 @@ where { let mut consensus_validators = BTreeMap::default(); for height in block_heights.into_iter() { - let epoch = wl_storage.pos_queries().get_epoch(height).expect( + let epoch = state.pos_queries().get_epoch(height).expect( "The epoch of the last block height should always be known", ); _ = consensus_validators.insert( height, - wl_storage + state .pos_queries() .get_consensus_validators(Some(epoch)) .iter() @@ -120,12 +120,10 @@ pub(super) fn get_voting_powers_for_selected( #[cfg(test)] mod tests { - use std::collections::HashSet; - use assert_matches::assert_matches; - use namada_core::types::address; - use namada_core::types::ethereum_events::testing::arbitrary_bonded_stake; - use namada_core::types::voting_power::FractionalVotingPower; + use namada_core::address; + use namada_core::ethereum_events::testing::arbitrary_bonded_stake; + use namada_core::voting_power::FractionalVotingPower; use super::*; diff --git a/crates/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs b/crates/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs index 1adcf3d09b..13de4cd87a 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs @@ -3,11 +3,11 @@ use std::collections::{HashMap, HashSet}; use eyre::Result; -use namada_core::types::address::Address; -use namada_core::types::key::common; -use namada_core::types::storage::{BlockHeight, Epoch}; -use namada_core::types::token::Amount; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_core::address::Address; +use namada_core::key::common; +use namada_core::storage::{BlockHeight, Epoch}; +use namada_core::token::Amount; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_tx::data::TxResult; use namada_vote_ext::validator_set_update; @@ -35,7 +35,7 @@ impl utils::GetVoters for (&validator_set_update::VextDigest, BlockHeight) { /// Sign the next set of validators, and return the associated /// vote extension protocol transaction. pub fn sign_validator_set_update( - wl_storage: &WlStorage, + state: &WlState, validator_addr: &Address, eth_hot_key: &common::SecretKey, ) -> Option @@ -43,13 +43,13 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - wl_storage + state .ethbridge_queries() .must_send_valset_upd(SendValsetUpd::Now) .then(|| { - let next_epoch = wl_storage.storage.get_current_epoch().0.next(); + let next_epoch = state.in_mem().get_current_epoch().0.next(); - let voting_powers = wl_storage + let voting_powers = state .ethbridge_queries() .get_consensus_eth_addresses(Some(next_epoch)) .iter() @@ -61,7 +61,7 @@ where let ext = validator_set_update::Vext { voting_powers, validator_addr: validator_addr.clone(), - signing_epoch: wl_storage.storage.get_current_epoch().0, + signing_epoch: state.in_mem().get_current_epoch().0, }; ext.sign(eth_hot_key) @@ -69,7 +69,7 @@ where } pub fn aggregate_votes( - wl_storage: &mut WlStorage, + state: &mut WlState, ext: validator_set_update::VextDigest, signing_epoch: Epoch, ) -> Result @@ -87,8 +87,8 @@ where "Aggregating new votes for validator set update" ); - let epoch_2nd_height = wl_storage - .storage + let epoch_2nd_height = state + .in_mem() .block .pred_epochs .get_start_height_of_epoch(signing_epoch) @@ -100,9 +100,9 @@ where .expect("The first block height of the signing epoch should be known") + 1; let voting_powers = - utils::get_voting_powers(wl_storage, (&ext, epoch_2nd_height))?; + utils::get_voting_powers(state, (&ext, epoch_2nd_height))?; let changed_keys = apply_update( - wl_storage, + state, ext, signing_epoch, epoch_2nd_height, @@ -116,7 +116,7 @@ where } fn apply_update( - wl_storage: &mut WlStorage, + state: &mut WlState, ext: validator_set_update::VextDigest, signing_epoch: Epoch, epoch_2nd_height: BlockHeight, @@ -134,14 +134,16 @@ where }; let valset_upd_keys = vote_tallies::Keys::from(&next_epoch); let maybe_proof = 'check_storage: { - let Some(seen) = votes::storage::maybe_read_seen(wl_storage, &valset_upd_keys)? else { + let Some(seen) = + votes::storage::maybe_read_seen(state, &valset_upd_keys)? + else { break 'check_storage None; }; if seen { tracing::debug!("Validator set update tally is already seen"); return Ok(ChangedKeys::default()); } - let proof = votes::storage::read_body(wl_storage, &valset_upd_keys)?; + let proof = votes::storage::read_body(state, &valset_upd_keys)?; Some(proof) }; @@ -161,11 +163,8 @@ where "Validator set update votes already in storage", ); let new_votes = NewVotes::new(seen_by, &voting_powers)?; - let (tally, changed) = votes::update::calculate( - wl_storage, - &valset_upd_keys, - new_votes, - )?; + let (tally, changed) = + votes::update::calculate(state, &valset_upd_keys, new_votes)?; if changed.is_empty() { return Ok(changed); } @@ -174,7 +173,7 @@ where proof.attach_signature_batch(ext.signatures.into_iter().map( |(addr, sig)| { ( - wl_storage + state .ethbridge_queries() .get_eth_addr_book(&addr, Some(signing_epoch)) .expect("All validators should have eth keys"), @@ -189,13 +188,12 @@ where ?ext.voting_powers, "New validator set update vote aggregation started" ); - let tally = - votes::calculate_new(wl_storage, seen_by, &voting_powers)?; + let tally = votes::calculate_new(state, seen_by, &voting_powers)?; let mut proof = EthereumProof::new(ext.voting_powers); proof.attach_signature_batch(ext.signatures.into_iter().map( |(addr, sig)| { ( - wl_storage + state .ethbridge_queries() .get_eth_addr_book(&addr, Some(signing_epoch)) .expect("All validators should have eth keys"), @@ -214,7 +212,7 @@ where "Applying validator set update state changes" ); votes::storage::write( - wl_storage, + state, &valset_upd_keys, &proof, &tally, @@ -233,8 +231,8 @@ where #[cfg(test)] mod test_valset_upd_state_changes { - use namada_core::types::address; - use namada_core::types::voting_power::FractionalVotingPower; + use namada_core::address; + use namada_core::voting_power::FractionalVotingPower; use namada_proof_of_stake::pos_queries::PosQueries; use namada_vote_ext::validator_set_update::VotingPowersMap; @@ -245,16 +243,16 @@ mod test_valset_upd_state_changes { /// it should have a complete proof backing it up in storage. #[test] fn test_seen_has_complete_proof() { - let (mut wl_storage, keys) = test_utils::setup_default_storage(); + let (mut state, keys) = test_utils::setup_default_storage(); - let last_height = wl_storage.storage.get_last_block_height(); - let signing_epoch = wl_storage + let last_height = state.in_mem().get_last_block_height(); + let signing_epoch = state .pos_queries() .get_epoch(last_height) .expect("The epoch of the last block height should be known"); let tx_result = aggregate_votes( - &mut wl_storage, + &mut state, validator_set_update::VextDigest::singleton( validator_set_update::Vext { voting_powers: VotingPowersMap::new(), @@ -287,13 +285,13 @@ mod test_valset_upd_state_changes { ); // check if the valset upd is marked as "seen" - let tally = votes::storage::read(&wl_storage, &valset_upd_keys) + let tally = votes::storage::read(&state, &valset_upd_keys) .expect("Test failed"); assert!(tally.seen); // read the proof in storage and make sure its signature is // from the configured validator - let proof = votes::storage::read_body(&wl_storage, &valset_upd_keys) + let proof = votes::storage::read_body(&state, &valset_upd_keys) .expect("Test failed"); assert_eq!(proof.data, VotingPowersMap::new()); @@ -303,7 +301,7 @@ mod test_valset_upd_state_changes { let addr_book = proof_sigs.pop().expect("Test failed"); assert_eq!( addr_book, - wl_storage + state .ethbridge_queries() .get_eth_addr_book( &address::testing::established_address_1(), @@ -314,10 +312,10 @@ mod test_valset_upd_state_changes { // since only one validator is configured, we should // have reached a complete proof - let total_voting_power = wl_storage + let total_voting_power = state .pos_queries() .get_total_voting_power(Some(signing_epoch)); - let validator_voting_power = wl_storage + let validator_voting_power = state .pos_queries() .get_validator_from_address( &address::testing::established_address_1(), @@ -338,7 +336,7 @@ mod test_valset_upd_state_changes { /// it should never have a complete proof backing it up in storage. #[test] fn test_not_seen_has_incomplete_proof() { - let (mut wl_storage, keys) = + let (mut state, keys) = test_utils::setup_storage_with_validators(HashMap::from_iter([ // the first validator has exactly 2/3 of the total stake ( @@ -351,14 +349,14 @@ mod test_valset_upd_state_changes { ), ])); - let last_height = wl_storage.storage.get_last_block_height(); - let signing_epoch = wl_storage + let last_height = state.in_mem().get_last_block_height(); + let signing_epoch = state .pos_queries() .get_epoch(last_height) .expect("The epoch of the last block height should be known"); let tx_result = aggregate_votes( - &mut wl_storage, + &mut state, validator_set_update::VextDigest::singleton( validator_set_update::Vext { voting_powers: VotingPowersMap::new(), @@ -391,13 +389,13 @@ mod test_valset_upd_state_changes { ); // assert the validator set update is not "seen" yet - let tally = votes::storage::read(&wl_storage, &valset_upd_keys) + let tally = votes::storage::read(&state, &valset_upd_keys) .expect("Test failed"); assert!(!tally.seen); // read the proof in storage and make sure its signature is // from the configured validator - let proof = votes::storage::read_body(&wl_storage, &valset_upd_keys) + let proof = votes::storage::read_body(&state, &valset_upd_keys) .expect("Test failed"); assert_eq!(proof.data, VotingPowersMap::new()); @@ -407,7 +405,7 @@ mod test_valset_upd_state_changes { let addr_book = proof_sigs.pop().expect("Test failed"); assert_eq!( addr_book, - wl_storage + state .ethbridge_queries() .get_eth_addr_book( &address::testing::established_address_1(), @@ -417,10 +415,10 @@ mod test_valset_upd_state_changes { ); // make sure we do not have a complete proof yet - let total_voting_power = wl_storage + let total_voting_power = state .pos_queries() .get_total_voting_power(Some(signing_epoch)); - let validator_voting_power = wl_storage + let validator_voting_power = state .pos_queries() .get_validator_from_address( &address::testing::established_address_1(), diff --git a/crates/ethereum_bridge/src/protocol/transactions/votes.rs b/crates/ethereum_bridge/src/protocol/transactions/votes.rs index 7accb41f66..be8a097200 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/votes.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/votes.rs @@ -5,12 +5,12 @@ use std::collections::{BTreeMap, BTreeSet, HashMap}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use eyre::{eyre, Result}; -use namada_core::types::address::Address; -use namada_core::types::storage::{BlockHeight, Epoch}; -use namada_core::types::token; -use namada_core::types::voting_power::FractionalVotingPower; +use namada_core::address::Address; +use namada_core::storage::{BlockHeight, Epoch}; +use namada_core::token; +use namada_core::voting_power::FractionalVotingPower; use namada_proof_of_stake::pos_queries::PosQueries; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use super::{read, ChangedKeys}; @@ -35,7 +35,7 @@ pub trait EpochedVotingPowerExt { /// the most staked tokens. fn epoch_max_voting_power( &self, - wl_storage: &WlStorage, + state: &WlState, ) -> Option where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -51,13 +51,13 @@ pub trait EpochedVotingPowerExt { #[inline] fn fractional_stake( &self, - wl_storage: &WlStorage, + state: &WlState, ) -> FractionalVotingPower where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let Some(max_voting_power) = self.epoch_max_voting_power(wl_storage) else { + let Some(max_voting_power) = self.epoch_max_voting_power(state) else { return FractionalVotingPower::NULL; }; FractionalVotingPower::new( @@ -70,12 +70,12 @@ pub trait EpochedVotingPowerExt { /// Check if the [`Tally`] associated with an [`EpochedVotingPower`] /// can be considered `seen`. #[inline] - fn has_majority_quorum(&self, wl_storage: &WlStorage) -> bool + fn has_majority_quorum(&self, state: &WlState) -> bool where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let Some(max_voting_power) = self.epoch_max_voting_power(wl_storage) else { + let Some(max_voting_power) = self.epoch_max_voting_power(state) else { return false; }; // NB: Preserve the safety property of the Tendermint protocol across @@ -96,7 +96,7 @@ pub trait EpochedVotingPowerExt { impl EpochedVotingPowerExt for EpochedVotingPower { fn epoch_max_voting_power( &self, - wl_storage: &WlStorage, + state: &WlState, ) -> Option where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -105,7 +105,7 @@ impl EpochedVotingPowerExt for EpochedVotingPower { self.keys() .copied() .map(|epoch| { - wl_storage.pos_queries().get_total_voting_power(Some(epoch)) + state.pos_queries().get_total_voting_power(Some(epoch)) }) .max() } @@ -136,7 +136,7 @@ pub struct Tally { /// Calculate a new [`Tally`] based on some validators' fractional voting powers /// as specific block heights pub fn calculate_new( - wl_storage: &WlStorage, + state: &WlState, seen_by: Votes, voting_powers: &HashMap<(Address, BlockHeight), token::Amount>, ) -> Result @@ -150,7 +150,7 @@ where .get(&(validator.to_owned(), block_height.to_owned())) { Some(&voting_power) => { - let epoch = wl_storage + let epoch = state .pos_queries() .get_epoch(*block_height) .expect("The queried epoch should be known"); @@ -168,7 +168,7 @@ where }; } - let newly_confirmed = seen_by_voting_power.has_majority_quorum(wl_storage); + let newly_confirmed = seen_by_voting_power.has_majority_quorum(state); Ok(Tally { voting_power: seen_by_voting_power, seen_by, @@ -185,10 +185,7 @@ pub fn dedupe(signers: BTreeSet<(Address, BlockHeight)>) -> Votes { #[cfg(test)] mod tests { - use std::collections::BTreeSet; - - use namada_core::types::storage::BlockHeight; - use namada_core::types::{address, token}; + use namada_core::address; use namada_proof_of_stake::parameters::OwnedPosParams; use namada_proof_of_stake::storage::write_pos_params; @@ -316,7 +313,7 @@ mod tests { validator_1_stake + validator_2_stake + validator_3_stake; // start epoch 0 with validator 1 - let (mut wl_storage, _) = test_utils::setup_storage_with_validators( + let (mut state, _) = test_utils::setup_storage_with_validators( HashMap::from([(validator_1.clone(), validator_1_stake)]), ); @@ -325,11 +322,11 @@ mod tests { pipeline_len: 1, ..Default::default() }; - write_pos_params(&mut wl_storage, ¶ms).expect("Test failed"); + write_pos_params(&mut state, ¶ms).expect("Test failed"); // insert validators 2 and 3 at epoch 1 test_utils::append_validators_to_storage( - &mut wl_storage, + &mut state, HashMap::from([ (validator_2.clone(), validator_2_stake), (validator_3.clone(), validator_3_stake), @@ -338,7 +335,7 @@ mod tests { // query validators to make sure they were inserted correctly let query_validators = |epoch: u64| { - wl_storage + state .pos_queries() .get_consensus_validators(Some(epoch.into())) .iter() @@ -352,9 +349,7 @@ mod tests { HashMap::from([(validator_1.clone(), validator_1_stake)]) ); assert_eq!( - wl_storage - .pos_queries() - .get_total_voting_power(Some(0.into())), + state.pos_queries().get_total_voting_power(Some(0.into())), validator_1_stake, ); assert_eq!( @@ -366,9 +361,7 @@ mod tests { ]) ); assert_eq!( - wl_storage - .pos_queries() - .get_total_voting_power(Some(1.into())), + state.pos_queries().get_total_voting_power(Some(1.into())), total_stake, ); @@ -378,7 +371,7 @@ mod tests { (1.into(), FractionalVotingPower::ONE_THIRD * total_stake), ]); assert_eq!( - aggregated.fractional_stake(&wl_storage), + aggregated.fractional_stake(&state), FractionalVotingPower::TWO_THIRDS ); } diff --git a/crates/ethereum_bridge/src/protocol/transactions/votes/storage.rs b/crates/ethereum_bridge/src/protocol/transactions/votes/storage.rs index 8830059b63..347af45ca0 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/votes/storage.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/votes/storage.rs @@ -1,16 +1,16 @@ use eyre::{Result, WrapErr}; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; use namada_core::hints; -use namada_core::types::storage::Key; -use namada_core::types::voting_power::FractionalVotingPower; -use namada_state::{DBIter, PrefixIter, StorageHasher, WlStorage, DB}; +use namada_core::storage::Key; +use namada_core::voting_power::FractionalVotingPower; +use namada_state::{DBIter, PrefixIter, StorageHasher, WlState, DB}; use namada_storage::{StorageRead, StorageWrite}; use super::{EpochedVotingPower, EpochedVotingPowerExt, Tally, Votes}; use crate::storage::vote_tallies; pub fn write( - wl_storage: &mut WlStorage, + state: &mut WlState, keys: &vote_tallies::Keys, body: &T, tally: &Tally, @@ -21,15 +21,15 @@ where H: 'static + StorageHasher + Sync, T: BorshSerialize, { - wl_storage.write(&keys.body(), body)?; - wl_storage.write(&keys.seen(), tally.seen)?; - wl_storage.write(&keys.seen_by(), tally.seen_by.clone())?; - wl_storage.write(&keys.voting_power(), tally.voting_power.clone())?; + state.write(&keys.body(), body)?; + state.write(&keys.seen(), tally.seen)?; + state.write(&keys.seen_by(), tally.seen_by.clone())?; + state.write(&keys.voting_power(), tally.voting_power.clone())?; if !already_present { // add the current epoch for the inserted event - wl_storage.write( + state.write( &keys.voting_started_epoch(), - wl_storage.storage.get_current_epoch().0, + state.in_mem().get_current_epoch().0, )?; } Ok(()) @@ -40,7 +40,7 @@ where /// of fractional voting power behind it. #[must_use = "The storage value returned by this function must be used"] pub fn delete( - wl_storage: &mut WlStorage, + state: &mut WlState, keys: &vote_tallies::Keys, ) -> Result> where @@ -50,38 +50,38 @@ where { let opt_body = { let voting_power: EpochedVotingPower = - super::read::value(wl_storage, &keys.voting_power())?; + super::read::value(state, &keys.voting_power())?; if hints::unlikely( - voting_power.fractional_stake(wl_storage) + voting_power.fractional_stake(state) > FractionalVotingPower::ONE_THIRD, ) { - let body: T = super::read::value(wl_storage, &keys.body())?; + let body: T = super::read::value(state, &keys.body())?; Some(body) } else { None } }; - wl_storage.delete(&keys.body())?; - wl_storage.delete(&keys.seen())?; - wl_storage.delete(&keys.seen_by())?; - wl_storage.delete(&keys.voting_power())?; - wl_storage.delete(&keys.voting_started_epoch())?; + state.delete(&keys.body())?; + state.delete(&keys.seen())?; + state.delete(&keys.seen_by())?; + state.delete(&keys.voting_power())?; + state.delete(&keys.voting_started_epoch())?; Ok(opt_body) } pub fn read( - wl_storage: &WlStorage, + state: &WlState, keys: &vote_tallies::Keys, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let seen: bool = super::read::value(wl_storage, &keys.seen())?; - let seen_by: Votes = super::read::value(wl_storage, &keys.seen_by())?; + let seen: bool = super::read::value(state, &keys.seen())?; + let seen_by: Votes = super::read::value(state, &keys.seen_by())?; let voting_power: EpochedVotingPower = - super::read::value(wl_storage, &keys.voting_power())?; + super::read::value(state, &keys.voting_power())?; Ok(Tally { voting_power, @@ -91,21 +91,21 @@ where } pub fn iter_prefix<'a, D, H>( - wl_storage: &'a WlStorage, + state: &'a WlState, prefix: &Key, ) -> Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - wl_storage + state .iter_prefix(prefix) .context("Failed to iterate over the given storage prefix") } #[inline] pub fn read_body( - wl_storage: &WlStorage, + state: &WlState, keys: &vote_tallies::Keys, ) -> Result where @@ -113,12 +113,12 @@ where H: 'static + StorageHasher + Sync, T: BorshDeserialize, { - super::read::value(wl_storage, &keys.body()) + super::read::value(state, &keys.body()) } #[inline] pub fn maybe_read_seen( - wl_storage: &WlStorage, + state: &WlState, keys: &vote_tallies::Keys, ) -> Result> where @@ -126,7 +126,7 @@ where H: 'static + StorageHasher + Sync, T: BorshDeserialize, { - super::read::maybe_value(wl_storage, &keys.seen()) + super::read::maybe_value(state, &keys.seen()) } #[cfg(test)] @@ -135,14 +135,14 @@ mod tests { use assert_matches::assert_matches; use namada_core::borsh::BorshSerializeExt; - use namada_core::types::ethereum_events::EthereumEvent; + use namada_core::ethereum_events::EthereumEvent; use super::*; use crate::test_utils; #[test] fn test_delete_expired_tally() { - let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (mut state, _) = test_utils::setup_default_storage(); let (validator, validator_voting_power) = test_utils::default_validator(); @@ -164,25 +164,25 @@ mod tests { seen_by: BTreeMap::from([(validator, 1.into())]), seen: false, }; - assert!(write(&mut wl_storage, &keys, &event, &tally, false).is_ok()); + assert!(write(&mut state, &keys, &event, &tally, false).is_ok()); // delete the tally and check that the body is returned - let opt_body = delete(&mut wl_storage, &keys).unwrap(); + let opt_body = delete(&mut state, &keys).unwrap(); assert_matches!(opt_body, Some(e) if e == event); // now, we write another tally, with <=1/3 voting power tally.voting_power = EpochedVotingPower::from([(0.into(), 1u64.into())]); - assert!(write(&mut wl_storage, &keys, &event, &tally, false).is_ok()); + assert!(write(&mut state, &keys, &event, &tally, false).is_ok()); // delete the tally and check that no body is returned - let opt_body = delete(&mut wl_storage, &keys).unwrap(); + let opt_body = delete(&mut state, &keys).unwrap(); assert_matches!(opt_body, None); } #[test] fn test_write_tally() { - let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (mut state, _) = test_utils::setup_default_storage(); let (validator, validator_voting_power) = test_utils::default_validator(); let event = EthereumEvent::TransfersToNamada { @@ -199,28 +199,27 @@ mod tests { seen: false, }; - let result = write(&mut wl_storage, &keys, &event, &tally, false); + let result = write(&mut state, &keys, &event, &tally, false); assert!(result.is_ok()); - let body = wl_storage.read_bytes(&keys.body()).unwrap(); + let body = state.read_bytes(&keys.body()).unwrap(); assert_eq!(body, Some(event.serialize_to_vec())); - let seen = wl_storage.read_bytes(&keys.seen()).unwrap(); + let seen = state.read_bytes(&keys.seen()).unwrap(); assert_eq!(seen, Some(tally.seen.serialize_to_vec())); - let seen_by = wl_storage.read_bytes(&keys.seen_by()).unwrap(); + let seen_by = state.read_bytes(&keys.seen_by()).unwrap(); assert_eq!(seen_by, Some(tally.seen_by.serialize_to_vec())); - let voting_power = wl_storage.read_bytes(&keys.voting_power()).unwrap(); + let voting_power = state.read_bytes(&keys.voting_power()).unwrap(); assert_eq!(voting_power, Some(tally.voting_power.serialize_to_vec())); - let epoch = - wl_storage.read_bytes(&keys.voting_started_epoch()).unwrap(); + let epoch = state.read_bytes(&keys.voting_started_epoch()).unwrap(); assert_eq!( epoch, - Some(wl_storage.storage.get_current_epoch().0.serialize_to_vec()) + Some(state.in_mem().get_current_epoch().0.serialize_to_vec()) ); } #[test] fn test_read_tally() { - let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (mut state, _) = test_utils::setup_default_storage(); let (validator, validator_voting_power) = test_utils::default_validator(); let event = EthereumEvent::TransfersToNamada { @@ -236,20 +235,20 @@ mod tests { seen_by: BTreeMap::from([(validator, 10.into())]), seen: false, }; - wl_storage.write(&keys.body(), &event).unwrap(); - wl_storage.write(&keys.seen(), tally.seen).unwrap(); - wl_storage.write(&keys.seen_by(), &tally.seen_by).unwrap(); - wl_storage + state.write(&keys.body(), &event).unwrap(); + state.write(&keys.seen(), tally.seen).unwrap(); + state.write(&keys.seen_by(), &tally.seen_by).unwrap(); + state .write(&keys.voting_power(), &tally.voting_power) .unwrap(); - wl_storage + state .write( &keys.voting_started_epoch(), - wl_storage.storage.get_block_height().0, + state.in_mem().get_block_height().0, ) .unwrap(); - let result = read(&wl_storage, &keys); + let result = read(&state, &keys); assert!(result.is_ok()); assert_eq!(result.unwrap(), tally); diff --git a/crates/ethereum_bridge/src/protocol/transactions/votes/update.rs b/crates/ethereum_bridge/src/protocol/transactions/votes/update.rs index 47027cde41..d2dc8c9dc5 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/votes/update.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/votes/update.rs @@ -2,11 +2,11 @@ use std::collections::{BTreeSet, HashMap, HashSet}; use borsh::BorshDeserialize; use eyre::{eyre, Result}; -use namada_core::types::address::Address; -use namada_core::types::storage::BlockHeight; -use namada_core::types::token; +use namada_core::address::Address; +use namada_core::storage::BlockHeight; +use namada_core::token; use namada_proof_of_stake::pos_queries::PosQueries; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use super::{ChangedKeys, EpochedVotingPowerExt, Tally, Votes}; use crate::storage::vote_tallies; @@ -90,7 +90,7 @@ impl IntoIterator for NewVotes { /// votes from `vote_info` should be applied, and the returned changed keys will /// be empty. pub(in super::super) fn calculate( - wl_storage: &mut WlStorage, + state: &mut WlState, keys: &vote_tallies::Keys, vote_info: NewVotes, ) -> Result<(Tally, ChangedKeys)> @@ -104,7 +104,7 @@ where validators = ?vote_info.voters(), "Calculating validators' votes applied to an existing tally" ); - let tally_pre = super::storage::read(wl_storage, keys)?; + let tally_pre = super::storage::read(state, keys)?; if tally_pre.seen { return Ok((tally_pre, ChangedKeys::default())); } @@ -118,7 +118,7 @@ where "Ignoring duplicate voter" ); } - let tally_post = apply(wl_storage, &tally_pre, vote_info) + let tally_post = apply(state, &tally_pre, vote_info) .expect("We deduplicated voters already, so this should never error"); let changed_keys = keys_changed(keys, &tally_pre, &tally_post); @@ -147,7 +147,7 @@ where /// voters from `vote_info`. An error is returned if any validator which /// previously voted is present in `vote_info`. fn apply( - wl_storage: &WlStorage, + state: &WlState, tally: &Tally, vote_info: NewVotes, ) -> Result @@ -167,7 +167,7 @@ where {already_voted_height}", )); }; - let epoch = wl_storage + let epoch = state .pos_queries() .get_epoch(vote_height) .expect("The queried epoch should be known"); @@ -177,7 +177,7 @@ where *aggregated += voting_power; } - let seen_post = voting_power_post.has_majority_quorum(wl_storage); + let seen_post = voting_power_post.has_majority_quorum(state); Ok(Tally { voting_power: voting_power_post, @@ -209,10 +209,10 @@ fn keys_changed( mod tests { use std::collections::BTreeMap; - use namada_core::types::address; - use namada_core::types::ethereum_events::EthereumEvent; - use namada_core::types::voting_power::FractionalVotingPower; - use namada_state::testing::TestWlStorage; + use namada_core::address; + use namada_core::ethereum_events::EthereumEvent; + use namada_core::voting_power::FractionalVotingPower; + use namada_state::testing::TestState; use self::helpers::{default_event, default_total_stake, TallyParams}; use super::*; @@ -242,7 +242,7 @@ mod tests { /// Parameters to construct a test [`Tally`]. pub(super) struct TallyParams<'a> { /// Handle to storage. - pub wl_storage: &'a mut TestWlStorage, + pub state: &'a mut TestState, /// The event to be voted on. pub event: &'a EthereumEvent, /// Votes from the given validators at the given block height. @@ -258,7 +258,7 @@ mod tests { /// Write an initial [`Tally`] to storage. pub(super) fn setup(self) -> Result { let Self { - wl_storage, + state, event, votes, total_stake, @@ -278,9 +278,9 @@ mod tests { seen: seen_voting_power > FractionalVotingPower::TWO_THIRDS * total_stake, }; - votes::storage::write(wl_storage, &keys, event, &tally, false)?; + votes::storage::write(state, &keys, event, &tally, false)?; total_consensus_stake_handle().set( - wl_storage, + state, total_stake, 0u64.into(), 0, @@ -376,8 +376,8 @@ mod tests { #[test] fn test_apply_duplicate_votes() -> Result<()> { - let mut wl_storage = TestWlStorage::default(); - test_utils::init_default_storage(&mut wl_storage); + let mut state = TestState::default(); + test_utils::init_default_storage(&mut state); let validator = address::testing::established_address_1(); let already_voted_height = BlockHeight(100); @@ -385,7 +385,7 @@ mod tests { let event = default_event(); let tally_pre = TallyParams { total_stake: default_total_stake(), - wl_storage: &mut wl_storage, + state: &mut state, event: &event, votes: HashSet::from([( validator.clone(), @@ -402,7 +402,7 @@ mod tests { )]); let vote_info = NewVotes::new(votes, &voting_powers)?; - let result = apply(&wl_storage, &tally_pre, vote_info); + let result = apply(&state, &tally_pre, vote_info); assert!(result.is_err()); Ok(()) @@ -412,13 +412,13 @@ mod tests { /// already recorded as having been seen. #[test] fn test_calculate_already_seen() -> Result<()> { - let mut wl_storage = TestWlStorage::default(); - test_utils::init_default_storage(&mut wl_storage); + let mut state = TestState::default(); + test_utils::init_default_storage(&mut state); let event = default_event(); let keys = vote_tallies::Keys::from(&event); let tally_pre = TallyParams { total_stake: default_total_stake(), - wl_storage: &mut wl_storage, + state: &mut state, event: &event, votes: HashSet::from([( address::testing::established_address_1(), @@ -439,7 +439,7 @@ mod tests { let vote_info = NewVotes::new(votes, &voting_powers)?; let (tally_post, changed_keys) = - calculate(&mut wl_storage, &keys, vote_info)?; + calculate(&mut state, &keys, vote_info)?; assert_eq!(tally_post, tally_pre); assert!(changed_keys.is_empty()); @@ -449,12 +449,12 @@ mod tests { /// Tests that an unchanged tally is returned if no votes are passed. #[test] fn test_calculate_empty() -> Result<()> { - let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (mut state, _) = test_utils::setup_default_storage(); let event = default_event(); let keys = vote_tallies::Keys::from(&event); let tally_pre = TallyParams { total_stake: default_total_stake(), - wl_storage: &mut wl_storage, + state: &mut state, event: &event, votes: HashSet::from([( address::testing::established_address_1(), @@ -466,7 +466,7 @@ mod tests { let vote_info = NewVotes::new(Votes::default(), &HashMap::default())?; let (tally_post, changed_keys) = - calculate(&mut wl_storage, &keys, vote_info)?; + calculate(&mut state, &keys, vote_info)?; assert_eq!(tally_post, tally_pre); assert!(changed_keys.is_empty()); @@ -477,13 +477,13 @@ mod tests { /// not yet seen. #[test] fn test_calculate_one_vote_not_seen() -> Result<()> { - let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (mut state, _) = test_utils::setup_default_storage(); let event = default_event(); let keys = vote_tallies::Keys::from(&event); let _tally_pre = TallyParams { total_stake: default_total_stake(), - wl_storage: &mut wl_storage, + state: &mut state, event: &event, votes: HashSet::from([( address::testing::established_address_1(), @@ -503,7 +503,7 @@ mod tests { let vote_info = NewVotes::new(votes, &voting_powers)?; let (tally_post, changed_keys) = - calculate(&mut wl_storage, &keys, vote_info)?; + calculate(&mut state, &keys, vote_info)?; assert_eq!( tally_post, @@ -529,7 +529,7 @@ mod tests { /// seen. #[test] fn test_calculate_one_vote_seen() -> Result<()> { - let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (mut state, _) = test_utils::setup_default_storage(); let first_vote_stake = FractionalVotingPower::ONE_THIRD * default_total_stake(); @@ -541,7 +541,7 @@ mod tests { let keys = vote_tallies::Keys::from(&event); let _tally_pre = TallyParams { total_stake, - wl_storage: &mut wl_storage, + state: &mut state, event: &event, votes: HashSet::from([( address::testing::established_address_1(), @@ -559,7 +559,7 @@ mod tests { let vote_info = NewVotes::new(votes, &voting_powers)?; let (tally_post, changed_keys) = - calculate(&mut wl_storage, &keys, vote_info)?; + calculate(&mut state, &keys, vote_info)?; assert_eq!( tally_post, diff --git a/crates/ethereum_bridge/src/protocol/validation/bridge_pool_roots.rs b/crates/ethereum_bridge/src/protocol/validation/bridge_pool_roots.rs index e178a1fa98..da54631656 100644 --- a/crates/ethereum_bridge/src/protocol/validation/bridge_pool_roots.rs +++ b/crates/ethereum_bridge/src/protocol/validation/bridge_pool_roots.rs @@ -1,9 +1,9 @@ //! Bridge pool roots validation. -use namada_core::types::keccak::keccak_hash; -use namada_core::types::storage::BlockHeight; +use namada_core::keccak::keccak_hash; +use namada_core::storage::BlockHeight; use namada_proof_of_stake::pos_queries::PosQueries; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_tx::{SignableEthMessage, Signed}; use namada_vote_ext::bridge_pool_roots; @@ -21,7 +21,7 @@ use crate::storage::eth_bridge_queries::EthBridgeQueries; /// * The validator signed over the correct height inside of the extension. /// * Check that the inner signature is valid. pub fn validate_bp_roots_vext( - wl_storage: &WlStorage, + state: &WlState, ext: &Signed, last_height: BlockHeight, ) -> Result<(), VoteExtensionError> @@ -32,7 +32,7 @@ where // NOTE: for ABCI++, we should pass // `last_height` here, instead of `ext.data.block_height` let ext_height_epoch = - match wl_storage.pos_queries().get_epoch(ext.data.block_height) { + match state.pos_queries().get_epoch(ext.data.block_height) { Some(epoch) => epoch, _ => { tracing::debug!( @@ -43,7 +43,7 @@ where return Err(VoteExtensionError::UnexpectedEpoch); } }; - if !wl_storage + if !state .ethbridge_queries() .is_bridge_active_at(ext_height_epoch) { @@ -71,7 +71,7 @@ where // get the public key associated with this validator let validator = &ext.data.validator_addr; - let (_, pk) = wl_storage + let (_, pk) = state .pos_queries() .get_validator_from_address(validator, Some(ext_height_epoch)) .map_err(|err| { @@ -96,12 +96,12 @@ where VoteExtensionError::VerifySigFailed })?; - let bp_root = wl_storage + let bp_root = state .ethbridge_queries() .get_bridge_pool_root_at_height(ext.data.block_height) .expect("We asserted that the queried height is correct") .0; - let nonce = wl_storage + let nonce = state .ethbridge_queries() .get_bridge_pool_nonce_at_height(ext.data.block_height) .to_bytes(); @@ -109,7 +109,7 @@ where keccak_hash([bp_root, nonce].concat()), ext.data.sig.clone(), ); - let pk = wl_storage + let pk = state .pos_queries() .read_validator_eth_hot_key(validator, Some(ext_height_epoch)) .expect("A validator should have an Ethereum hot key in storage."); diff --git a/crates/ethereum_bridge/src/protocol/validation/ethereum_events.rs b/crates/ethereum_bridge/src/protocol/validation/ethereum_events.rs index a71d743c35..1ad96a2876 100644 --- a/crates/ethereum_bridge/src/protocol/validation/ethereum_events.rs +++ b/crates/ethereum_bridge/src/protocol/validation/ethereum_events.rs @@ -1,8 +1,8 @@ //! Ethereum events validation. -use namada_core::types::storage::BlockHeight; +use namada_core::storage::BlockHeight; use namada_proof_of_stake::pos_queries::PosQueries; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_tx::Signed; use namada_vote_ext::ethereum_events; @@ -19,7 +19,7 @@ use crate::storage::eth_bridge_queries::EthBridgeQueries; /// * There are no duplicate Ethereum events in this vote extension, and the /// events are sorted in ascending order. pub fn validate_eth_events_vext( - wl_storage: &WlStorage, + state: &WlState, ext: &Signed, last_height: BlockHeight, ) -> Result<(), VoteExtensionError> @@ -30,7 +30,7 @@ where // NOTE: for ABCI++, we should pass // `last_height` here, instead of `ext.data.block_height` let ext_height_epoch = - match wl_storage.pos_queries().get_epoch(ext.data.block_height) { + match state.pos_queries().get_epoch(ext.data.block_height) { Some(epoch) => epoch, _ => { tracing::debug!( @@ -41,7 +41,7 @@ where return Err(VoteExtensionError::UnexpectedEpoch); } }; - if !wl_storage + if !state .ethbridge_queries() .is_bridge_active_at(ext_height_epoch) { @@ -65,10 +65,10 @@ where tracing::debug!("Dropping vote extension issued at genesis"); return Err(VoteExtensionError::UnexpectedBlockHeight); } - validate_eth_events(wl_storage, &ext.data)?; + validate_eth_events(state, &ext.data)?; // get the public key associated with this validator let validator = &ext.data.validator_addr; - let (_, pk) = wl_storage + let (_, pk) = state .pos_queries() .get_validator_from_address(validator, Some(ext_height_epoch)) .map_err(|err| { @@ -102,7 +102,7 @@ where /// ascending ordering, must not contain any dupes /// and must have valid nonces. fn validate_eth_events( - wl_storage: &WlStorage, + state: &WlState, ext: ðereum_events::Vext, ) -> Result<(), VoteExtensionError> where @@ -128,11 +128,11 @@ where } // for the proposal to be valid, at least one of the // event's nonces must be valid - if ext.ethereum_events.iter().any(|event| { - wl_storage - .ethbridge_queries() - .validate_eth_event_nonce(event) - }) { + if ext + .ethereum_events + .iter() + .any(|event| state.ethbridge_queries().validate_eth_event_nonce(event)) + { Ok(()) } else { Err(VoteExtensionError::InvalidEthEventNonce) diff --git a/crates/ethereum_bridge/src/protocol/validation/validator_set_update.rs b/crates/ethereum_bridge/src/protocol/validation/validator_set_update.rs index 37cedea65a..a63326ab4c 100644 --- a/crates/ethereum_bridge/src/protocol/validation/validator_set_update.rs +++ b/crates/ethereum_bridge/src/protocol/validation/validator_set_update.rs @@ -1,8 +1,8 @@ //! Validator set update validation. -use namada_core::types::storage::Epoch; +use namada_core::storage::Epoch; use namada_proof_of_stake::pos_queries::PosQueries; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_vote_ext::validator_set_update; use super::VoteExtensionError; @@ -27,7 +27,7 @@ use crate::storage::eth_bridge_queries::EthBridgeQueries; /// * The voting powers signed over were Ethereum ABI encoded, normalized to /// `2^32`, and sorted in descending order. pub fn validate_valset_upd_vext( - wl_storage: &WlStorage, + state: &WlState, ext: &validator_set_update::SignedVext, last_epoch: Epoch, ) -> Result<(), VoteExtensionError> @@ -35,7 +35,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter>, H: 'static + StorageHasher, { - if wl_storage.storage.last_block.is_none() { + if state.in_mem().last_block.is_none() { tracing::debug!( "Dropping validator set update vote extension issued at genesis" ); @@ -51,7 +51,7 @@ where ); return Err(VoteExtensionError::UnexpectedEpoch); } - if wl_storage + if state .ethbridge_queries() .valset_upd_seen(signing_epoch.next()) { @@ -65,7 +65,7 @@ where // verify if the new epoch validators' voting powers in storage match // the voting powers in the vote extension let mut no_local_consensus_eth_addresses = 0; - for (eth_addr_book, namada_addr, namada_power) in wl_storage + for (eth_addr_book, namada_addr, namada_power) in state .ethbridge_queries() .get_consensus_eth_addresses(Some(signing_epoch.next())) .iter() @@ -103,7 +103,7 @@ where } // get the public key associated with this validator let validator = &ext.data.validator_addr; - let pk = wl_storage + let pk = state .pos_queries() .read_validator_eth_hot_key(validator, Some(signing_epoch)) .ok_or_else(|| { @@ -132,8 +132,8 @@ where #[cfg(test)] mod tests { use assert_matches::assert_matches; - use namada_core::types::ethereum_events::EthAddress; - use namada_core::types::key::{common, RefTo}; + use namada_core::ethereum_events::EthAddress; + use namada_core::key::{common, RefTo}; use namada_vote_ext::validator_set_update::{EthAddrBook, VotingPowersMap}; use super::*; @@ -143,7 +143,7 @@ mod tests { /// next validator set in storage. #[test] fn test_superset_valsetupd_rejected() { - let (wl_storage, keys) = test_utils::setup_default_storage(); + let (state, keys) = test_utils::setup_default_storage(); let (validator, validator_stake) = test_utils::default_validator(); let hot_key_addr = { @@ -194,7 +194,7 @@ mod tests { } .sign(&keys.get(&validator).expect("Test failed").eth_bridge); - let result = validate_valset_upd_vext(&wl_storage, &ext, 0.into()); + let result = validate_valset_upd_vext(&state, &ext, 0.into()); assert_matches!( result, Err(VoteExtensionError::ExtraValidatorsInExtension) diff --git a/crates/ethereum_bridge/src/storage/bridge_pool.rs b/crates/ethereum_bridge/src/storage/bridge_pool.rs index 9fe34e7611..1ef4870df8 100644 --- a/crates/ethereum_bridge/src/storage/bridge_pool.rs +++ b/crates/ethereum_bridge/src/storage/bridge_pool.rs @@ -1,12 +1,12 @@ //! Tools for accessing the storage subspaces of the Ethereum //! bridge pool -use namada_core::types::eth_bridge_pool::Segments; -pub use namada_core::types::eth_bridge_pool::{ +use namada_core::eth_bridge_pool::Segments; +pub use namada_core::eth_bridge_pool::{ get_key_from_hash, get_pending_key, is_pending_transfer_key, BRIDGE_POOL_ADDRESS, }; -use namada_core::types::storage::{DbKeySeg, Key}; +use namada_core::storage::{DbKeySeg, Key}; pub use namada_state::merkle_tree::eth_bridge_pool::BridgePoolTree; /// Get the storage key for the root of the Merkle tree diff --git a/crates/ethereum_bridge/src/storage/eth_bridge_queries.rs b/crates/ethereum_bridge/src/storage/eth_bridge_queries.rs index 44b38a751a..903cad06d6 100644 --- a/crates/ethereum_bridge/src/storage/eth_bridge_queries.rs +++ b/crates/ethereum_bridge/src/storage/eth_bridge_queries.rs @@ -1,22 +1,19 @@ use borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::hints; -use namada_core::types::address::Address; -use namada_core::types::eth_abi::Encode; -use namada_core::types::eth_bridge_pool::PendingTransfer; -use namada_core::types::ethereum_events::{ +use namada_core::address::Address; +use namada_core::eth_abi::Encode; +use namada_core::eth_bridge_pool::PendingTransfer; +use namada_core::ethereum_events::{ EthAddress, EthereumEvent, GetEventNonce, TransferToEthereum, Uint, }; -use namada_core::types::keccak::KeccakHash; -use namada_core::types::storage::{BlockHeight, Epoch, Key as StorageKey}; -use namada_core::types::token; -use namada_core::types::voting_power::{ - EthBridgeVotingPower, FractionalVotingPower, -}; +use namada_core::keccak::KeccakHash; +use namada_core::storage::{BlockHeight, Epoch, Key as StorageKey}; +use namada_core::voting_power::{EthBridgeVotingPower, FractionalVotingPower}; +use namada_core::{hints, token}; use namada_proof_of_stake::pos_queries::{ConsensusValidators, PosQueries}; use namada_proof_of_stake::storage::{ validator_eth_cold_key_handle, validator_eth_hot_key_handle, }; -use namada_state::{DBIter, StorageHasher, StoreType, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, StoreType, WlState, DB}; use namada_storage::StorageRead; use namada_vote_ext::validator_set_update::{ EthAddrBook, ValidatorSetArgs, VotingPowersMap, VotingPowersMapExt, @@ -85,7 +82,7 @@ pub trait EthBridgeQueries { fn ethbridge_queries(&self) -> EthBridgeQueriesHook<'_, Self::Storage>; } -impl EthBridgeQueries for WlStorage +impl EthBridgeQueries for WlState where D: 'static + DB + for<'iter> DBIter<'iter>, H: 'static + StorageHasher, @@ -94,39 +91,36 @@ where #[inline] fn ethbridge_queries(&self) -> EthBridgeQueriesHook<'_, Self> { - EthBridgeQueriesHook { wl_storage: self } + EthBridgeQueriesHook { state: self } } } /// A handle to [`EthBridgeQueries`]. /// -/// This type is a wrapper around a pointer to a -/// [`WlStorage`]. +/// This type is a wrapper around a pointer to a [`WlState`]. #[derive(Debug)] #[repr(transparent)] -pub struct EthBridgeQueriesHook<'db, DB> { - wl_storage: &'db DB, +pub struct EthBridgeQueriesHook<'db, S> { + state: &'db S, } -impl<'db, DB> Clone for EthBridgeQueriesHook<'db, DB> { +impl<'db, S> Clone for EthBridgeQueriesHook<'db, S> { fn clone(&self) -> Self { - Self { - wl_storage: self.wl_storage, - } + *self } } -impl<'db, DB> Copy for EthBridgeQueriesHook<'db, DB> {} +impl<'s, S> Copy for EthBridgeQueriesHook<'s, S> {} -impl<'db, D, H> EthBridgeQueriesHook<'db, WlStorage> +impl<'db, D, H> EthBridgeQueriesHook<'db, WlState> where D: 'static + DB + for<'iter> DBIter<'iter>, H: 'static + StorageHasher, { - /// Return a handle to the inner [`WlStorage`]. + /// Return a handle to the inner [`WlState`]. #[inline] - pub fn storage(self) -> &'db WlStorage { - self.wl_storage + pub fn state(self) -> &'db WlState { + self.state } /// Check if a validator set update proof is available for @@ -138,7 +132,7 @@ where ); } let valset_upd_keys = vote_tallies::Keys::from(&epoch); - self.wl_storage + self.state .read(&valset_upd_keys.seen()) .expect("Reading a value from storage should not fail") .unwrap_or(false) @@ -148,7 +142,7 @@ where /// scheduled to be enabled at a specified epoch. pub fn check_bridge_status(self) -> EthBridgeStatus { BorshDeserialize::try_from_slice( - self.wl_storage + self.state .read_bytes(&active_key()) .expect( "Reading the Ethereum bridge active key shouldn't fail.", @@ -163,7 +157,7 @@ where /// currently active. #[inline] pub fn is_bridge_active(self) -> bool { - self.is_bridge_active_at(self.wl_storage.storage.get_current_epoch().0) + self.is_bridge_active_at(self.state.in_mem().get_current_epoch().0) } /// Behaves exactly like [`Self::is_bridge_active`], but performs @@ -180,8 +174,8 @@ where /// Get the nonce of the next transfers to Namada event to be processed. pub fn get_next_nam_transfers_nonce(self) -> Uint { - self.wl_storage - .storage + self.state + .in_mem() .eth_events_queue .transfers_to_namada .get_event_nonce() @@ -192,12 +186,10 @@ where pub fn get_bridge_pool_nonce(self) -> Uint { Uint::try_from_slice( &self - .wl_storage - .storage - .read(&bridge_pool::get_nonce_key()) + .state + .read_bytes(&bridge_pool::get_nonce_key()) .expect("Reading Bridge pool nonce shouldn't fail.") - .0 - .expect("Reading Bridge pool nonce shouldn't fail."), + .expect("Bridge pool nonce must be present."), ) .expect("Deserializing the nonce from storage should not fail.") } @@ -206,13 +198,12 @@ where pub fn get_bridge_pool_nonce_at_height(self, height: BlockHeight) -> Uint { Uint::try_from_slice( &self - .wl_storage - .storage - .db + .state + .db() .read_subspace_val_with_height( &bridge_pool::get_nonce_key(), height, - self.wl_storage.storage.get_last_block_height(), + self.state.in_mem().get_last_block_height(), ) .expect("Reading signed Bridge pool nonce shouldn't fail.") .expect("Reading signed Bridge pool nonce shouldn't fail."), @@ -223,8 +214,8 @@ where /// Get the latest root of the Ethereum bridge /// pool Merkle tree. pub fn get_bridge_pool_root(self) -> KeccakHash { - self.wl_storage - .storage + self.state + .in_mem() .block .tree .sub_root(&StoreType::BridgePool) @@ -243,7 +234,7 @@ where pub fn get_signed_bridge_pool_root( self, ) -> Option<(BridgePoolRootProof, BlockHeight)> { - self.wl_storage + self.state .read_bytes(&bridge_pool::get_signed_root_key()) .expect("Reading signed Bridge pool root shouldn't fail.") .map(|bytes| { @@ -261,8 +252,7 @@ where height: BlockHeight, ) -> Option { let base_tree = self - .wl_storage - .storage + .state .get_merkle_tree(height, Some(StoreType::BridgePool)) .ok()?; Some(base_tree.sub_root(&StoreType::BridgePool).into()) @@ -279,7 +269,7 @@ where } else { // offset of 1 => are we at the 2nd // block within the epoch? - self.wl_storage.is_deciding_offset_within_epoch(1) + self.state.is_deciding_offset_within_epoch(1) } } @@ -291,11 +281,11 @@ where validator: &Address, epoch: Option, ) -> Option { - let epoch = epoch - .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); - let params = self.wl_storage.pos_queries().get_pos_params(); + let epoch = + epoch.unwrap_or_else(|| self.state.in_mem().get_current_epoch().0); + let params = self.state.pos_queries().get_pos_params(); validator_eth_hot_key_handle(validator) - .get(self.wl_storage, epoch, ¶ms) + .get(self.state, epoch, ¶ms) .expect("Should be able to read eth hot key from storage") .and_then(|ref pk| pk.try_into().ok()) } @@ -308,11 +298,11 @@ where validator: &Address, epoch: Option, ) -> Option { - let epoch = epoch - .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); - let params = self.wl_storage.pos_queries().get_pos_params(); + let epoch = + epoch.unwrap_or_else(|| self.state.in_mem().get_current_epoch().0); + let params = self.state.pos_queries().get_pos_params(); validator_eth_cold_key_handle(validator) - .get(self.wl_storage, epoch, ¶ms) + .get(self.state, epoch, ¶ms) .expect("Should be able to read eth cold key from storage") .and_then(|ref pk| pk.try_into().ok()) } @@ -341,14 +331,14 @@ where self, epoch: Option, ) -> ConsensusEthAddresses<'db, D, H> { - let epoch = epoch - .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); + let epoch = + epoch.unwrap_or_else(|| self.state.in_mem().get_current_epoch().0); let consensus_validators = self - .wl_storage + .state .pos_queries() .get_consensus_validators(Some(epoch)); ConsensusEthAddresses { - wl_storage: self.wl_storage, + state: self.state, consensus_validators, epoch, } @@ -364,8 +354,8 @@ where where F: FnMut(&EthAddrBook) -> EthAddress, { - let epoch = epoch - .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); + let epoch = + epoch.unwrap_or_else(|| self.state.in_mem().get_current_epoch().0); let voting_powers_map: VotingPowersMap = self .get_consensus_eth_addresses(Some(epoch)) @@ -374,7 +364,7 @@ where .collect(); let total_power = self - .wl_storage + .state .pos_queries() .get_total_voting_power(Some(epoch)) .into(); @@ -434,7 +424,7 @@ where } .into(); - self.wl_storage + self.state .read(&key) .expect("Reading from storage should not fail") .unwrap_or(false) @@ -451,7 +441,7 @@ where } .into(); - self.wl_storage + self.state .read(&key) .expect("Reading from storage should not fail") } @@ -470,7 +460,7 @@ where } .into(); - self.wl_storage + self.state .read(&key) .expect("Reading from storage should not fail") } @@ -481,8 +471,8 @@ where /// NUTs are minted when: /// /// 1. `token` is not whitelisted. - /// 2. `token` has exceeded the configured token caps, - /// after minting `amount_to_mint`. + /// 2. `token` has exceeded the configured token caps, after minting + /// `amount_to_mint`. pub fn get_eth_assets_to_mint( self, token: &EthAddress, @@ -529,7 +519,7 @@ where transfer: &TransferToEthereum, ) -> Option<(PendingTransfer, StorageKey)> { let pending_key = bridge_pool::get_key_from_hash(&transfer.keccak256()); - self.wl_storage + self.state .read(&pending_key) .expect("Reading from storage should not fail") .zip(Some(pending_key)) @@ -615,8 +605,8 @@ where H: 'static + StorageHasher, { epoch: Epoch, - wl_storage: &'db WlStorage, - consensus_validators: ConsensusValidators<'db, WlStorage>, + state: &'db WlState, + consensus_validators: ConsensusValidators<'db, WlState>, } impl<'db, D, H> ConsensusEthAddresses<'db, D, H> @@ -631,7 +621,7 @@ where ) -> impl Iterator + 'db { self.consensus_validators.iter().map(move |validator| { let eth_addr_book = self - .wl_storage + .state .ethbridge_queries() .get_eth_addr_book(&validator.address, Some(self.epoch)) .expect("All Namada validators should have Ethereum keys"); diff --git a/crates/ethereum_bridge/src/storage/mod.rs b/crates/ethereum_bridge/src/storage/mod.rs index 96c2b8dd84..37ecfa6f57 100644 --- a/crates/ethereum_bridge/src/storage/mod.rs +++ b/crates/ethereum_bridge/src/storage/mod.rs @@ -9,14 +9,15 @@ pub mod vp; pub mod whitelist; pub mod wrapped_erc20s; -use namada_core::ledger::eth_bridge::ADDRESS; -use namada_core::types::address::Address; -use namada_core::types::storage::{DbKeySeg, Key, KeySeg}; +use namada_core::address::Address; +use namada_core::storage::{DbKeySeg, Key, KeySeg}; pub use namada_parameters::native_erc20_key; use namada_parameters::storage::*; use namada_parameters::ADDRESS as PARAM_ADDRESS; use namada_trans_token::storage_key::balance_key; +use crate::ADDRESS; + /// Key prefix for the storage subspace pub fn prefix() -> Key { Key::from(ADDRESS.to_db_key()) @@ -45,7 +46,7 @@ pub fn has_eth_addr_segment(key: &Key) -> bool { /// Returns whether a key belongs to this account or not pub fn is_eth_bridge_key(nam_addr: &Address, key: &Key) -> bool { key == &escrow_key(nam_addr) - || matches!(key.segments.get(0), Some(first_segment) if first_segment == &ADDRESS.to_db_key()) + || matches!(key.segments.first(), Some(first_segment) if first_segment == &ADDRESS.to_db_key()) || wrapped_erc20s::has_erc20_segment(key) } @@ -67,9 +68,9 @@ pub fn bridge_contract_key() -> Key { #[cfg(test)] mod test { - use namada_core::types::address; - use namada_core::types::address::nam; - use namada_core::types::ethereum_events::testing::arbitrary_eth_address; + use namada_core::address; + use namada_core::address::testing::nam; + use namada_core::ethereum_events::testing::arbitrary_eth_address; use super::*; diff --git a/crates/ethereum_bridge/src/storage/parameters.rs b/crates/ethereum_bridge/src/storage/parameters.rs index ead8b612f4..c30321d1fa 100644 --- a/crates/ethereum_bridge/src/storage/parameters.rs +++ b/crates/ethereum_bridge/src/storage/parameters.rs @@ -3,11 +3,11 @@ use std::num::NonZeroU64; use eyre::{eyre, Result}; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::ethereum_events::EthAddress; -use namada_core::types::ethereum_structs; -use namada_core::types::storage::Key; -use namada_core::types::token::{DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES}; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_core::ethereum_events::EthAddress; +use namada_core::ethereum_structs; +use namada_core::storage::Key; +use namada_core::token::{DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_storage::{StorageRead, StorageWrite}; use serde::{Deserialize, Serialize}; @@ -167,7 +167,7 @@ impl EthereumBridgeParams { /// /// If these parameters are initialized, the storage subspaces /// for the Ethereum bridge VPs are also initialized. - pub fn init_storage(&self, wl_storage: &mut WlStorage) + pub fn init_storage(&self, state: &mut WlState) where D: 'static + DB + for<'iter> DBIter<'iter>, H: 'static + StorageHasher, @@ -187,18 +187,18 @@ impl EthereumBridgeParams { let native_erc20_key = bridge_storage::native_erc20_key(); let bridge_contract_key = bridge_storage::bridge_contract_key(); let eth_start_height_key = bridge_storage::eth_start_height_key(); - wl_storage + state .write( &active_key, EthBridgeStatus::Enabled(EthBridgeEnabled::AtGenesis), ) .unwrap(); - wl_storage + state .write(&min_confirmations_key, min_confirmations) .unwrap(); - wl_storage.write(&native_erc20_key, native_erc20).unwrap(); - wl_storage.write(&bridge_contract_key, bridge).unwrap(); - wl_storage + state.write(&native_erc20_key, native_erc20).unwrap(); + state.write(&bridge_contract_key, bridge).unwrap(); + state .write(ð_start_height_key, eth_start_height) .unwrap(); for Erc20WhitelistEntry { @@ -221,26 +221,26 @@ impl EthereumBridgeParams { suffix: whitelist::KeyType::Whitelisted, } .into(); - wl_storage.write(&key, true).unwrap(); + state.write(&key, true).unwrap(); let key = whitelist::Key { asset: *addr, suffix: whitelist::KeyType::Cap, } .into(); - wl_storage.write(&key, cap).unwrap(); + state.write(&key, cap).unwrap(); let key = whitelist::Key { asset: *addr, suffix: whitelist::KeyType::Denomination, } .into(); - wl_storage.write(&key, denom).unwrap(); + state.write(&key, denom).unwrap(); } // Initialize the storage for the Ethereum Bridge VP. - vp::ethereum_bridge::init_storage(wl_storage); + vp::ethereum_bridge::init_storage(state); // Initialize the storage for the Bridge Pool VP. - vp::bridge_pool::init_storage(wl_storage); + vp::bridge_pool::init_storage(state); } } @@ -279,7 +279,7 @@ impl EthereumOracleConfig { /// present, `None` will be returned - this could be the case if the bridge /// has not been bootstrapped yet. Panics if the storage appears to be /// corrupt. - pub fn read(wl_storage: &WlStorage) -> Option + pub fn read(state: &WlState) -> Option where D: 'static + DB + for<'iter> DBIter<'iter>, H: 'static + StorageHasher, @@ -288,10 +288,9 @@ impl EthereumOracleConfig { // should not panic, when the active status key has not been // written to; simply return bridge disabled instead let has_active_key = - wl_storage.has_key(&bridge_storage::active_key()).unwrap(); + state.has_key(&bridge_storage::active_key()).unwrap(); - if !has_active_key || !wl_storage.ethbridge_queries().is_bridge_active() - { + if !has_active_key || !state.ethbridge_queries().is_bridge_active() { return None; } @@ -302,11 +301,10 @@ impl EthereumOracleConfig { // These reads must succeed otherwise the storage is corrupt or a // read failed - let min_confirmations = - must_read_key(wl_storage, &min_confirmations_key); - let native_erc20 = must_read_key(wl_storage, &native_erc20_key); - let bridge_contract = must_read_key(wl_storage, &bridge_contract_key); - let eth_start_height = must_read_key(wl_storage, ð_start_height_key); + let min_confirmations = must_read_key(state, &min_confirmations_key); + let native_erc20 = must_read_key(state, &native_erc20_key); + let bridge_contract = must_read_key(state, &bridge_contract_key); + let eth_start_height = must_read_key(state, ð_start_height_key); Some(Self { eth_start_height, @@ -341,14 +339,14 @@ where /// Reads the value of `key` from `storage` and deserializes it, or panics /// otherwise. fn must_read_key( - wl_storage: &WlStorage, + state: &WlState, key: &Key, ) -> T where D: 'static + DB + for<'iter> DBIter<'iter>, H: 'static + StorageHasher, { - StorageRead::read::(wl_storage, key).map_or_else( + StorageRead::read::(state, key).map_or_else( |err| panic!("Could not read {key}: {err:?}"), |value| { value.unwrap_or_else(|| { @@ -363,9 +361,7 @@ where #[cfg(test)] mod tests { - use eyre::Result; - use namada_core::types::ethereum_events::EthAddress; - use namada_state::testing::TestWlStorage; + use namada_state::testing::TestState; use super::*; @@ -395,7 +391,7 @@ mod tests { #[test] fn test_ethereum_bridge_config_read_write_storage() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let config = EthereumBridgeParams { erc20_whitelist: vec![], eth_start_height: Default::default(), @@ -408,9 +404,9 @@ mod tests { }, }, }; - config.init_storage(&mut wl_storage); + config.init_storage(&mut state); - let read = EthereumOracleConfig::read(&wl_storage).unwrap(); + let read = EthereumOracleConfig::read(&state).unwrap(); let config = EthereumOracleConfig::from(config); assert_eq!(config, read); @@ -418,8 +414,8 @@ mod tests { #[test] fn test_ethereum_bridge_config_uninitialized() { - let wl_storage = TestWlStorage::default(); - let read = EthereumOracleConfig::read(&wl_storage); + let state = TestState::default(); + let read = EthereumOracleConfig::read(&state); assert!(read.is_none()); } @@ -427,7 +423,7 @@ mod tests { #[test] #[should_panic(expected = "Could not read")] fn test_ethereum_bridge_config_storage_corrupt() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let config = EthereumBridgeParams { erc20_whitelist: vec![], eth_start_height: Default::default(), @@ -440,14 +436,14 @@ mod tests { }, }, }; - config.init_storage(&mut wl_storage); + config.init_storage(&mut state); let min_confirmations_key = bridge_storage::min_confirmations_key(); - wl_storage + state .write_bytes(&min_confirmations_key, vec![42, 1, 2, 3, 4]) .unwrap(); // This should panic because the min_confirmations value is not valid - EthereumOracleConfig::read(&wl_storage); + EthereumOracleConfig::read(&state); } #[test] @@ -455,15 +451,15 @@ mod tests { expected = "Ethereum bridge appears to be only partially configured!" )] fn test_ethereum_bridge_config_storage_partially_configured() { - let mut wl_storage = TestWlStorage::default(); - wl_storage + let mut state = TestState::default(); + state .write( &bridge_storage::active_key(), EthBridgeStatus::Enabled(EthBridgeEnabled::AtGenesis), ) .unwrap(); // Write a valid min_confirmations value - wl_storage + state .write( &bridge_storage::min_confirmations_key(), MinimumConfirmations::default(), @@ -471,6 +467,6 @@ mod tests { .unwrap(); // This should panic as the other config values are not written - EthereumOracleConfig::read(&wl_storage); + EthereumOracleConfig::read(&state); } } diff --git a/crates/ethereum_bridge/src/storage/proof.rs b/crates/ethereum_bridge/src/storage/proof.rs index 92b377f70f..2f5895f363 100644 --- a/crates/ethereum_bridge/src/storage/proof.rs +++ b/crates/ethereum_bridge/src/storage/proof.rs @@ -4,12 +4,12 @@ use std::collections::HashMap; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use ethers::abi::Tokenizable; -use namada_core::types::eth_abi::Encode; -use namada_core::types::ethereum_events::Uint; -use namada_core::types::keccak::KeccakHash; -use namada_core::types::key::{common, secp256k1}; -use namada_core::types::storage::Epoch; -use namada_core::types::{eth_abi, ethereum_structs}; +use namada_core::eth_abi::Encode; +use namada_core::ethereum_events::Uint; +use namada_core::keccak::KeccakHash; +use namada_core::key::{common, secp256k1}; +use namada_core::storage::Epoch; +use namada_core::{eth_abi, ethereum_structs}; use namada_vote_ext::validator_set_update::{ valset_upd_toks_to_hashes, EthAddrBook, VotingPowersMap, VotingPowersMapExt, }; @@ -123,8 +123,8 @@ mod test_ethbridge_proofs { //! Test ethereum bridge proofs. use assert_matches::assert_matches; - use namada_core::types::ethereum_events::EthAddress; - use namada_core::types::key; + use namada_core::ethereum_events::EthAddress; + use namada_core::key; use namada_tx::Signed; use super::*; diff --git a/crates/ethereum_bridge/src/storage/vote_tallies.rs b/crates/ethereum_bridge/src/storage/vote_tallies.rs index 354edb9958..7ed38aa409 100644 --- a/crates/ethereum_bridge/src/storage/vote_tallies.rs +++ b/crates/ethereum_bridge/src/storage/vote_tallies.rs @@ -4,16 +4,16 @@ use std::io::{Read, Write}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::ledger::eth_bridge::ADDRESS; -use namada_core::types::address::Address; -use namada_core::types::ethereum_events::{EthereumEvent, Uint}; -use namada_core::types::hash::Hash; -use namada_core::types::keccak::{keccak_hash, KeccakHash}; -use namada_core::types::storage::{BlockHeight, DbKeySeg, Epoch, Key}; +use namada_core::address::Address; +use namada_core::ethereum_events::{EthereumEvent, Uint}; +use namada_core::hash::Hash; +use namada_core::keccak::{keccak_hash, KeccakHash}; +use namada_core::storage::{BlockHeight, DbKeySeg, Epoch, Key}; use namada_macros::StorageKeys; use namada_vote_ext::validator_set_update::VotingPowersMap; use crate::storage::proof::{BridgePoolRootProof, EthereumProof}; +use crate::ADDRESS; /// Storage sub-key space reserved to keeping track of the /// voting power assigned to Ethereum events. @@ -284,7 +284,7 @@ mod test { fn test_ethereum_event_keys_all_keys() { let (event, hash) = helpers::arbitrary_event_with_hash(); let keys: Keys = (&event).into(); - let prefix = vec![ + let prefix = [ DbKeySeg::AddressSeg(ADDRESS), DbKeySeg::StringSeg(ETH_MSGS_PREFIX_KEY_SEGMENT.to_owned()), DbKeySeg::StringSeg(hash), @@ -339,7 +339,7 @@ mod test { fn test_ethereum_event_keys_from_ethereum_event() { let (event, hash) = helpers::arbitrary_event_with_hash(); let keys: Keys = (&event).into(); - let expected = vec![ + let expected = [ DbKeySeg::AddressSeg(ADDRESS), DbKeySeg::StringSeg(ETH_MSGS_PREFIX_KEY_SEGMENT.to_owned()), DbKeySeg::StringSeg(hash), @@ -351,7 +351,7 @@ mod test { fn test_ethereum_event_keys_from_hash() { let (event, hash) = helpers::arbitrary_event_with_hash(); let keys: Keys = (&event.hash().unwrap()).into(); - let expected = vec![ + let expected = [ DbKeySeg::AddressSeg(ADDRESS), DbKeySeg::StringSeg(ETH_MSGS_PREFIX_KEY_SEGMENT.to_owned()), DbKeySeg::StringSeg(hash), diff --git a/crates/ethereum_bridge/src/storage/vp/bridge_pool.rs b/crates/ethereum_bridge/src/storage/vp/bridge_pool.rs index ec52cf37db..f09fd9ebe3 100644 --- a/crates/ethereum_bridge/src/storage/vp/bridge_pool.rs +++ b/crates/ethereum_bridge/src/storage/vp/bridge_pool.rs @@ -1,6 +1,5 @@ -use namada_core::types::ethereum_events::Uint; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; -use namada_storage::StorageWrite; +use namada_core::ethereum_events::Uint; +use namada_storage::{StorageRead, StorageWrite}; use namada_trans_token::storage_key::balance_key; use namada_trans_token::Amount; @@ -10,17 +9,16 @@ use crate::storage::bridge_pool::{get_nonce_key, BRIDGE_POOL_ADDRESS}; /// /// This means that the amount of escrowed gas fees is /// initialized to 0. -pub fn init_storage(wl_storage: &mut WlStorage) +pub fn init_storage(storage: &mut S) where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, + S: StorageRead + StorageWrite, { let escrow_key = - balance_key(&wl_storage.storage.native_token, &BRIDGE_POOL_ADDRESS); - wl_storage.write(&escrow_key, Amount::default()).expect( + balance_key(&storage.get_native_token().unwrap(), &BRIDGE_POOL_ADDRESS); + storage.write(&escrow_key, Amount::default()).expect( "Initializing the escrow balance of the Bridge pool VP shouldn't fail.", ); - wl_storage + storage .write(&get_nonce_key(), Uint::from(0)) .expect("Initializing the Bridge pool nonce shouldn't fail."); } diff --git a/crates/ethereum_bridge/src/storage/vp/ethereum_bridge.rs b/crates/ethereum_bridge/src/storage/vp/ethereum_bridge.rs index 644555548b..c1a8dd8e16 100644 --- a/crates/ethereum_bridge/src/storage/vp/ethereum_bridge.rs +++ b/crates/ethereum_bridge/src/storage/vp/ethereum_bridge.rs @@ -1,21 +1,20 @@ -use namada_core::ledger::eth_bridge::ADDRESS; -use namada_core::types::hash::StorageHasher; -use namada_state::{DBIter, WlStorage, DB}; -use namada_storage::StorageWrite; +use namada_storage::{StorageRead, StorageWrite}; use namada_trans_token::storage_key::balance_key; use namada_trans_token::Amount; +use crate::ADDRESS; + /// Initialize the storage owned by the Ethereum Bridge VP. /// /// This means that the amount of escrowed Nam is /// initialized to 0. -pub fn init_storage(wl_storage: &mut WlStorage) +pub fn init_storage(storage: &mut S) where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, + S: StorageRead + StorageWrite, { - let escrow_key = balance_key(&wl_storage.storage.native_token, &ADDRESS); - wl_storage.write(&escrow_key, Amount::default()).expect( + let escrow_key = + balance_key(&storage.get_native_token().unwrap(), &ADDRESS); + storage.write(&escrow_key, Amount::default()).expect( "Initializing the escrow balance of the Ethereum Bridge VP shouldn't \ fail.", ); diff --git a/crates/ethereum_bridge/src/storage/whitelist.rs b/crates/ethereum_bridge/src/storage/whitelist.rs index 6abc873dfb..349469a0d0 100644 --- a/crates/ethereum_bridge/src/storage/whitelist.rs +++ b/crates/ethereum_bridge/src/storage/whitelist.rs @@ -5,18 +5,18 @@ use std::str::FromStr; -use namada_core::ledger::eth_bridge::ADDRESS as BRIDGE_ADDRESS; -use namada_core::types::eth_bridge_pool::erc20_token_address; -use namada_core::types::ethereum_events::EthAddress; -use namada_core::types::storage; -use namada_core::types::storage::DbKeySeg; +use namada_core::eth_bridge_pool::erc20_token_address; +use namada_core::ethereum_events::EthAddress; +use namada_core::storage; +use namada_core::storage::DbKeySeg; use namada_trans_token::storage_key::{denom_key, minted_balance_key}; use super::prefix as ethbridge_key_prefix; +use crate::ADDRESS as BRIDGE_ADDRESS; mod segments { //! Storage key segments under the token whitelist. - use namada_core::types::address::Address; + use namada_core::address::Address; use namada_macros::StorageKeys; /// The name of the main storage segment. @@ -118,7 +118,7 @@ pub fn is_cap_or_whitelisted_key(key: &storage::Key) -> bool { #[cfg(test)] mod tests { - use namada_core::types::ethereum_events::testing::DAI_ERC20_ETH_ADDRESS; + use namada_core::ethereum_events::testing::DAI_ERC20_ETH_ADDRESS; use super::*; diff --git a/crates/ethereum_bridge/src/storage/wrapped_erc20s.rs b/crates/ethereum_bridge/src/storage/wrapped_erc20s.rs index 4976caea8f..d4e68d97a6 100644 --- a/crates/ethereum_bridge/src/storage/wrapped_erc20s.rs +++ b/crates/ethereum_bridge/src/storage/wrapped_erc20s.rs @@ -1,12 +1,12 @@ //! Functionality for accessing the multitoken subspace use eyre::eyre; -use namada_core::types::address::{Address, InternalAddress}; -pub use namada_core::types::eth_bridge_pool::{ +use namada_core::address::{Address, InternalAddress}; +pub use namada_core::eth_bridge_pool::{ erc20_nut_address as nut, erc20_token_address as token, }; -use namada_core::types::ethereum_events::EthAddress; -use namada_core::types::storage::{self, DbKeySeg}; +use namada_core::ethereum_events::EthAddress; +use namada_core::storage::{self, DbKeySeg}; use namada_trans_token::storage_key::{ balance_key, minted_balance_key, MINTED_STORAGE_KEY, }; @@ -106,13 +106,11 @@ impl TryFrom<(&Address, &storage::Key)> for Key { #[cfg(test)] mod test { - use std::result::Result; use std::str::FromStr; use assert_matches::assert_matches; - use namada_core::types::address::{nam, Address}; - use namada_core::types::ethereum_events::testing::DAI_ERC20_ETH_ADDRESS; - use namada_core::types::storage::DbKeySeg; + use namada_core::address::testing::nam; + use namada_core::ethereum_events::testing::DAI_ERC20_ETH_ADDRESS; use super::*; use crate::token::storage_key::BALANCE_STORAGE_KEY; diff --git a/crates/ethereum_bridge/src/test_utils.rs b/crates/ethereum_bridge/src/test_utils.rs index bc108be1d0..28d42921a6 100644 --- a/crates/ethereum_bridge/src/test_utils.rs +++ b/crates/ethereum_bridge/src/test_utils.rs @@ -4,13 +4,14 @@ use std::collections::HashMap; use std::num::NonZeroU64; use namada_account::protocol_pk_key; -use namada_core::types::address::{self, wnam, Address}; -use namada_core::types::dec::Dec; -use namada_core::types::ethereum_events::EthAddress; -use namada_core::types::keccak::KeccakHash; -use namada_core::types::key::{self, RefTo}; -use namada_core::types::storage::{BlockHeight, Key}; -use namada_core::types::token; +use namada_core::address::testing::wnam; +use namada_core::address::{self, Address}; +use namada_core::dec::Dec; +use namada_core::ethereum_events::EthAddress; +use namada_core::keccak::KeccakHash; +use namada_core::key::{self, RefTo}; +use namada_core::storage::{BlockHeight, Key}; +use namada_core::token; use namada_proof_of_stake::parameters::OwnedPosParams; use namada_proof_of_stake::pos_queries::PosQueries; use namada_proof_of_stake::types::GenesisValidator; @@ -18,7 +19,7 @@ use namada_proof_of_stake::{ become_validator, bond_tokens, compute_and_store_total_consensus_stake, staking_token_address, BecomeValidator, }; -use namada_state::testing::TestWlStorage; +use namada_state::testing::TestState; use namada_storage::{StorageRead, StorageWrite}; use namada_trans_token::credit_tokens; @@ -62,26 +63,26 @@ impl TestValidatorKeys { } } -/// Set up a [`TestWlStorage`] initialized at genesis with a single +/// Set up a [`TestState`] initialized at genesis with a single /// validator. /// /// The validator's address is [`address::testing::established_address_1`]. #[inline] pub fn setup_default_storage() --> (TestWlStorage, HashMap) { - let mut wl_storage = TestWlStorage::default(); - let all_keys = init_default_storage(&mut wl_storage); - (wl_storage, all_keys) +-> (TestState, HashMap) { + let mut state = TestState::default(); + let all_keys = init_default_storage(&mut state); + (state, all_keys) } -/// Set up a [`TestWlStorage`] initialized at genesis with +/// Set up a [`TestState`] initialized at genesis with /// [`default_validator`]. #[inline] pub fn init_default_storage( - wl_storage: &mut TestWlStorage, + state: &mut TestState, ) -> HashMap { init_storage_with_validators( - wl_storage, + state, HashMap::from_iter([default_validator()]), ) } @@ -97,10 +98,10 @@ pub fn default_validator() -> (Address, token::Amount) { (addr, voting_power) } -/// Writes a dummy [`EthereumBridgeParams`] to the given [`TestWlStorage`], and +/// Writes a dummy [`EthereumBridgeParams`] to the given [`TestState`], and /// returns it. pub fn bootstrap_ethereum_bridge( - wl_storage: &mut TestWlStorage, + state: &mut TestState, ) -> EthereumBridgeParams { let config = EthereumBridgeParams { // start with empty erc20 whitelist @@ -120,7 +121,7 @@ pub fn bootstrap_ethereum_bridge( }, }, }; - config.init_storage(wl_storage); + config.init_storage(state); config } @@ -133,7 +134,7 @@ pub struct WhitelistMeta { } /// Whitelist the given Ethereum tokens. -pub fn whitelist_tokens(wl_storage: &mut TestWlStorage, token_list: L) +pub fn whitelist_tokens(state: &mut TestState, token_list: L) where L: Into>, { @@ -143,52 +144,50 @@ where suffix: whitelist::KeyType::Cap, } .into(); - wl_storage.write(&cap_key, cap).expect("Test failed"); + state.write(&cap_key, cap).expect("Test failed"); let whitelisted_key = whitelist::Key { asset, suffix: whitelist::KeyType::Whitelisted, } .into(); - wl_storage - .write(&whitelisted_key, true) - .expect("Test failed"); + state.write(&whitelisted_key, true).expect("Test failed"); let denom_key = whitelist::Key { asset, suffix: whitelist::KeyType::Denomination, } .into(); - wl_storage.write(&denom_key, denom).expect("Test failed"); + state.write(&denom_key, denom).expect("Test failed"); } } /// Returns the number of keys in `storage` which have values present. -pub fn stored_keys_count(wl_storage: &TestWlStorage) -> usize { +pub fn stored_keys_count(state: &TestState) -> usize { let root = Key { segments: vec![] }; - wl_storage.iter_prefix(&root).expect("Test failed").count() + state.iter_prefix(&root).expect("Test failed").count() } -/// Set up a [`TestWlStorage`] initialized at genesis with the given +/// Set up a [`TestState`] initialized at genesis with the given /// validators. pub fn setup_storage_with_validators( consensus_validators: HashMap, -) -> (TestWlStorage, HashMap) { - let mut wl_storage = TestWlStorage::default(); +) -> (TestState, HashMap) { + let mut state = TestState::default(); let all_keys = - init_storage_with_validators(&mut wl_storage, consensus_validators); - (wl_storage, all_keys) + init_storage_with_validators(&mut state, consensus_validators); + (state, all_keys) } -/// Set up a [`TestWlStorage`] initialized at genesis with the given +/// Set up a [`TestState`] initialized at genesis with the given /// validators. pub fn init_storage_with_validators( - wl_storage: &mut TestWlStorage, + state: &mut TestState, consensus_validators: HashMap, ) -> HashMap { // set last height to a reasonable value; // it should allow vote extensions to be cast - wl_storage.storage.block.height = 1.into(); + state.in_mem_mut().block.height = 1.into(); let mut all_keys = HashMap::new(); let validators: Vec<_> = consensus_validators @@ -215,28 +214,25 @@ pub fn init_storage_with_validators( .collect(); namada_proof_of_stake::test_utils::test_init_genesis( - wl_storage, + state, OwnedPosParams::default(), validators.into_iter(), 0.into(), ) .expect("Test failed"); - bootstrap_ethereum_bridge(wl_storage); + bootstrap_ethereum_bridge(state); for (validator, keys) in all_keys.iter() { let protocol_key = keys.protocol.ref_to(); - wl_storage + state .write(&protocol_pk_key(validator), protocol_key) .expect("Test failed"); } // Initialize pred_epochs to the current height - wl_storage - .storage - .block - .pred_epochs - .new_epoch(wl_storage.storage.block.height); - wl_storage.commit_block().expect("Test failed"); - wl_storage.storage.block.height += 1; + let height = state.in_mem().block.height; + state.in_mem_mut().block.pred_epochs.new_epoch(height); + state.commit_block().expect("Test failed"); + state.in_mem_mut().block.height += 1; all_keys } @@ -246,28 +242,28 @@ pub fn init_storage_with_validators( /// /// N.B. assumes the bridge pool is empty. pub fn commit_bridge_pool_root_at_height( - wl_storage: &mut TestWlStorage, + state: &mut TestState, root: &KeccakHash, height: BlockHeight, ) { - wl_storage.storage.block.height = height; - wl_storage.write(&get_key_from_hash(root), height).unwrap(); - wl_storage.commit_block().unwrap(); - wl_storage.delete(&get_key_from_hash(root)).unwrap(); + state.in_mem_mut().block.height = height; + state.write(&get_key_from_hash(root), height).unwrap(); + state.commit_block().unwrap(); + state.delete(&get_key_from_hash(root)).unwrap(); } /// Append validators to storage at the current epoch /// offset by pipeline length. pub fn append_validators_to_storage( - wl_storage: &mut TestWlStorage, + state: &mut TestState, consensus_validators: HashMap, ) -> HashMap { - let current_epoch = wl_storage.storage.get_current_epoch().0; + let current_epoch = state.in_mem().get_current_epoch().0; let mut all_keys = HashMap::new(); - let params = wl_storage.pos_queries().get_pos_params(); + let params = state.pos_queries().get_pos_params(); - let staking_token = staking_token_address(wl_storage); + let staking_token = staking_token_address(state); for (validator, stake) in consensus_validators { let keys = TestValidatorKeys::generate(); @@ -278,7 +274,7 @@ pub fn append_validators_to_storage( let eth_hot_key = &keys.eth_bridge.ref_to(); become_validator( - wl_storage, + state, BecomeValidator { params: ¶ms, address: &validator, @@ -294,27 +290,27 @@ pub fn append_validators_to_storage( }, ) .expect("Test failed"); - credit_tokens(wl_storage, &staking_token, &validator, stake) + credit_tokens(state, &staking_token, &validator, stake) .expect("Test failed"); - bond_tokens(wl_storage, None, &validator, stake, current_epoch, None) + bond_tokens(state, None, &validator, stake, current_epoch, None) .expect("Test failed"); all_keys.insert(validator, keys); } compute_and_store_total_consensus_stake( - wl_storage, + state, current_epoch + params.pipeline_len, ) .expect("Test failed"); for (validator, keys) in all_keys.iter() { let protocol_key = keys.protocol.ref_to(); - wl_storage + state .write(&protocol_pk_key(validator), protocol_key) .expect("Test failed"); } - wl_storage.commit_block().expect("Test failed"); + state.commit_block().expect("Test failed"); all_keys } diff --git a/crates/governance/Cargo.toml b/crates/governance/Cargo.toml index 8283fd4eb6..82cbb75c1b 100644 --- a/crates/governance/Cargo.toml +++ b/crates/governance/Cargo.toml @@ -19,7 +19,7 @@ testing = ["proptest"] namada_core = { path = "../core" } namada_macros = {path = "../macros"} namada_parameters = {path = "../parameters"} -namada_state = {path = "../state"} +namada_storage = {path = "../storage"} namada_trans_token = {path = "../trans_token"} borsh.workspace = true diff --git a/crates/governance/src/cli/offline.rs b/crates/governance/src/cli/offline.rs index b89d9afe10..f55a3e0104 100644 --- a/crates/governance/src/cli/offline.rs +++ b/crates/governance/src/cli/offline.rs @@ -2,13 +2,13 @@ use std::collections::{BTreeMap, BTreeSet}; use std::fs::{File, ReadDir}; use std::path::PathBuf; +use namada_core::account::AccountPublicKeysMap; +use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; -use namada_core::types::account::AccountPublicKeysMap; -use namada_core::types::address::Address; -use namada_core::types::hash::Hash; -use namada_core::types::key::{common, RefTo, SigScheme}; -use namada_core::types::sign::SignatureIndex; -use namada_core::types::storage::Epoch; +use namada_core::hash::Hash; +use namada_core::key::{common, RefTo, SigScheme}; +use namada_core::sign::SignatureIndex; +use namada_core::storage::Epoch; use serde::{Deserialize, Serialize}; use super::validation::{is_valid_tally_epoch, ProposalValidation}; diff --git a/crates/governance/src/cli/onchain.rs b/crates/governance/src/cli/onchain.rs index 3e4e82b1d3..f4c405af13 100644 --- a/crates/governance/src/cli/onchain.rs +++ b/crates/governance/src/cli/onchain.rs @@ -2,9 +2,9 @@ use std::collections::BTreeMap; use std::fmt::Display; use borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::address::Address; -use namada_core::types::storage::Epoch; -use namada_core::types::token; +use namada_core::address::Address; +use namada_core::storage::Epoch; +use namada_core::token; use serde::{Deserialize, Serialize}; use super::validation::{ diff --git a/crates/governance/src/cli/validation.rs b/crates/governance/src/cli/validation.rs index db3222614d..07efc82e93 100644 --- a/crates/governance/src/cli/validation.rs +++ b/crates/governance/src/cli/validation.rs @@ -1,8 +1,8 @@ use std::collections::BTreeMap; -use namada_core::types::address::Address; -use namada_core::types::storage::Epoch; -use namada_core::types::token; +use namada_core::address::Address; +use namada_core::storage::Epoch; +use namada_core::token; use thiserror::Error; use super::onchain::{PgfFunding, StewardsUpdate}; diff --git a/crates/governance/src/lib.rs b/crates/governance/src/lib.rs index 54d23b623b..49d6695a99 100644 --- a/crates/governance/src/lib.rs +++ b/crates/governance/src/lib.rs @@ -1,6 +1,6 @@ //! Governance library code -use namada_core::types::address::{self, Address}; +use namada_core::address::{self, Address}; /// governance CLI structures pub mod cli; diff --git a/crates/governance/src/parameters.rs b/crates/governance/src/parameters.rs index a93eefad43..5318ec2880 100644 --- a/crates/governance/src/parameters.rs +++ b/crates/governance/src/parameters.rs @@ -1,6 +1,6 @@ use namada_core::borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::token; -use namada_state::{StorageRead, StorageResult, StorageWrite}; +use namada_core::token; +use namada_storage::{Result, StorageRead, StorageWrite}; use super::storage::keys as goverance_storage; @@ -46,7 +46,7 @@ impl Default for GovernanceParameters { impl GovernanceParameters { /// Initialize governance parameters into storage - pub fn init_storage(&self, storage: &mut S) -> StorageResult<()> + pub fn init_storage(&self, storage: &mut S) -> Result<()> where S: StorageRead + StorageWrite, { diff --git a/crates/governance/src/pgf/cli/steward.rs b/crates/governance/src/pgf/cli/steward.rs index 6cfbf61b26..bac7482603 100644 --- a/crates/governance/src/pgf/cli/steward.rs +++ b/crates/governance/src/pgf/cli/steward.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; -use namada_core::types::address::Address; -use namada_core::types::dec::Dec; +use namada_core::address::Address; +use namada_core::dec::Dec; use serde::{Deserialize, Serialize}; use crate::pgf::REWARD_DISTRIBUTION_LIMIT; diff --git a/crates/governance/src/pgf/inflation.rs b/crates/governance/src/pgf/inflation.rs index 30391a622a..a77373e4f9 100644 --- a/crates/governance/src/pgf/inflation.rs +++ b/crates/governance/src/pgf/inflation.rs @@ -1,11 +1,9 @@ //! PGF lib code. -use namada_core::types::address::Address; -use namada_core::types::token; +use namada_core::address::Address; +use namada_core::token; use namada_parameters::storage as params_storage; -use namada_state::{ - DBIter, StorageHasher, StorageRead, StorageResult, WlStorage, DB, -}; +use namada_storage::{Result, StorageRead, StorageWrite}; use namada_trans_token::credit_tokens; use namada_trans_token::storage_key::minted_balance_key; @@ -13,19 +11,13 @@ use crate::pgf::storage::{get_parameters, get_payments, get_stewards}; use crate::storage::proposal::{PGFIbcTarget, PGFTarget}; /// Apply the PGF inflation. -pub fn apply_inflation( - storage: &mut WlStorage, +pub fn apply_inflation( + storage: &mut S, transfer_over_ibc: F, -) -> StorageResult<()> +) -> Result<()> where - D: DB + for<'iter> DBIter<'iter> + Sync + 'static, - H: StorageHasher + Sync + 'static, - F: Fn( - &mut WlStorage, - &Address, - &Address, - &PGFIbcTarget, - ) -> StorageResult<()>, + S: StorageWrite + StorageRead, + F: Fn(&mut S, &Address, &Address, &PGFIbcTarget) -> Result<()>, { let pgf_parameters = get_parameters(storage)?; let staking_token = storage.get_native_token()?; diff --git a/crates/governance/src/pgf/mod.rs b/crates/governance/src/pgf/mod.rs index 77302b6ebd..22592625cf 100644 --- a/crates/governance/src/pgf/mod.rs +++ b/crates/governance/src/pgf/mod.rs @@ -1,6 +1,6 @@ //! Pgf library code -use namada_core::types::address::{Address, InternalAddress}; +use namada_core::address::{Address, InternalAddress}; /// Pgf CLI pub mod cli; diff --git a/crates/governance/src/pgf/parameters.rs b/crates/governance/src/pgf/parameters.rs index 416cbcb931..69e6fea9d3 100644 --- a/crates/governance/src/pgf/parameters.rs +++ b/crates/governance/src/pgf/parameters.rs @@ -1,9 +1,9 @@ use std::collections::BTreeSet; +use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::address::Address; -use namada_core::types::dec::Dec; -use namada_state::{StorageRead, StorageResult, StorageWrite}; +use namada_core::dec::Dec; +use namada_storage::{Result, StorageRead, StorageWrite}; use serde::{Deserialize, Serialize}; use super::storage::keys as pgf_storage; @@ -44,7 +44,7 @@ impl Default for PgfParameters { impl PgfParameters { /// Initialize governance parameters into storage - pub fn init_storage(&self, storage: &mut S) -> StorageResult<()> + pub fn init_storage(&self, storage: &mut S) -> Result<()> where S: StorageRead + StorageWrite, { diff --git a/crates/governance/src/pgf/storage/keys.rs b/crates/governance/src/pgf/storage/keys.rs index 5d581d27a7..dcdee44d83 100644 --- a/crates/governance/src/pgf/storage/keys.rs +++ b/crates/governance/src/pgf/storage/keys.rs @@ -1,7 +1,7 @@ -use namada_core::types::address::Address; -use namada_core::types::storage::{DbKeySeg, Key, KeySeg}; +use namada_core::address::Address; +use namada_core::storage::{DbKeySeg, Key, KeySeg}; use namada_macros::StorageKeys; -use namada_state::collections::{lazy_map, LazyCollection, LazyMap}; +use namada_storage::collections::{lazy_map, LazyCollection, LazyMap}; use crate::pgf::storage::steward::StewardDetail; use crate::pgf::ADDRESS; diff --git a/crates/governance/src/pgf/storage/mod.rs b/crates/governance/src/pgf/storage/mod.rs index e064a00f62..1b523ecf9a 100644 --- a/crates/governance/src/pgf/storage/mod.rs +++ b/crates/governance/src/pgf/storage/mod.rs @@ -7,9 +7,9 @@ pub mod steward; use std::collections::HashMap; -use namada_core::types::address::Address; -use namada_core::types::dec::Dec; -use namada_state::{StorageRead, StorageResult, StorageWrite}; +use namada_core::address::Address; +use namada_core::dec::Dec; +use namada_storage::{Result, StorageRead, StorageWrite}; use crate::pgf::parameters::PgfParameters; use crate::pgf::storage::keys as pgf_keys; @@ -17,7 +17,7 @@ use crate::pgf::storage::steward::StewardDetail; use crate::storage::proposal::StoragePgfFunding; /// Query the current pgf steward set -pub fn get_stewards(storage: &S) -> StorageResult> +pub fn get_stewards(storage: &S) -> Result> where S: StorageRead, { @@ -36,7 +36,7 @@ where pub fn get_steward( storage: &S, address: &Address, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -44,7 +44,7 @@ where } /// Check if an address is a steward -pub fn is_steward(storage: &S, address: &Address) -> StorageResult +pub fn is_steward(storage: &S, address: &Address) -> Result where S: StorageRead, { @@ -52,10 +52,7 @@ where } /// Remove a steward -pub fn remove_steward( - storage: &mut S, - address: &Address, -) -> StorageResult<()> +pub fn remove_steward(storage: &mut S, address: &Address) -> Result<()> where S: StorageRead + StorageWrite, { @@ -65,7 +62,7 @@ where } /// Query the current pgf continuous payments -pub fn get_payments(storage: &S) -> StorageResult> +pub fn get_payments(storage: &S) -> Result> where S: StorageRead, { @@ -81,7 +78,7 @@ where } /// Query the pgf parameters -pub fn get_parameters(storage: &S) -> StorageResult +pub fn get_parameters(storage: &S) -> Result where S: StorageRead, { @@ -108,7 +105,7 @@ pub fn update_commission( storage: &mut S, address: Address, reward_distribution: HashMap, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { diff --git a/crates/governance/src/pgf/storage/steward.rs b/crates/governance/src/pgf/storage/steward.rs index ce6855a130..973c33ed78 100644 --- a/crates/governance/src/pgf/storage/steward.rs +++ b/crates/governance/src/pgf/storage/steward.rs @@ -1,8 +1,8 @@ use std::collections::HashMap; use borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::address::Address; -use namada_core::types::dec::Dec; +use namada_core::address::Address; +use namada_core::dec::Dec; use crate::pgf::REWARD_DISTRIBUTION_LIMIT; diff --git a/crates/governance/src/storage/keys.rs b/crates/governance/src/storage/keys.rs index 5eed69cbba..ded546af78 100644 --- a/crates/governance/src/storage/keys.rs +++ b/crates/governance/src/storage/keys.rs @@ -1,5 +1,5 @@ -use namada_core::types::address::Address; -use namada_core::types::storage::{DbKeySeg, Key, KeySeg}; +use namada_core::address::Address; +use namada_core::storage::{DbKeySeg, Key, KeySeg}; use namada_macros::StorageKeys; use crate::ADDRESS; diff --git a/crates/governance/src/storage/mod.rs b/crates/governance/src/storage/mod.rs index 4feaeba578..a6cc8bc787 100644 --- a/crates/governance/src/storage/mod.rs +++ b/crates/governance/src/storage/mod.rs @@ -9,12 +9,10 @@ pub mod vote; use std::collections::BTreeMap; +use namada_core::address::Address; use namada_core::borsh::BorshDeserialize; -use namada_core::types::address::Address; -use namada_core::types::storage::Epoch; -use namada_state::{ - iter_prefix, StorageError, StorageRead, StorageResult, StorageWrite, -}; +use namada_core::storage::Epoch; +use namada_storage::{iter_prefix, Error, Result, StorageRead, StorageWrite}; use namada_trans_token as token; use crate::parameters::GovernanceParameters; @@ -32,7 +30,7 @@ pub fn init_proposal( data: InitProposalData, content: Vec, code: Option>, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -57,7 +55,7 @@ where governance_keys::get_proposal_code_key(proposal_id); let proposal_code = code .clone() - .ok_or(StorageError::new_const("Missing proposal code"))?; + .ok_or(Error::new_const("Missing proposal code"))?; storage.write_bytes(&proposal_code_key, proposal_code)? } _ => storage.write(&proposal_type_key, data.r#type.clone())?, @@ -78,7 +76,7 @@ where let proposal_code_key = governance_keys::get_proposal_code_key(proposal_id); let proposal_code = - code.ok_or(StorageError::new_const("Missing proposal code"))?; + code.ok_or(Error::new_const("Missing proposal code"))?; storage.write_bytes(&proposal_code_key, proposal_code)?; } @@ -109,10 +107,7 @@ where } /// A proposal vote transaction. -pub fn vote_proposal( - storage: &mut S, - data: VoteProposalData, -) -> StorageResult<()> +pub fn vote_proposal(storage: &mut S, data: VoteProposalData) -> Result<()> where S: StorageRead + StorageWrite, { @@ -132,7 +127,7 @@ pub fn write_proposal_result( storage: &mut S, proposal_id: u64, proposal_result: ProposalResult, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -145,7 +140,7 @@ where pub fn get_proposal_by_id( storage: &S, id: u64, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -178,10 +173,7 @@ where } /// Query all the votes for a proposal_id -pub fn get_proposal_votes( - storage: &S, - proposal_id: u64, -) -> StorageResult> +pub fn get_proposal_votes(storage: &S, proposal_id: u64) -> Result> where S: StorageRead, { @@ -216,10 +208,7 @@ where } /// Check if an accepted proposal is being executed -pub fn is_proposal_accepted( - storage: &S, - tx_data: &[u8], -) -> StorageResult +pub fn is_proposal_accepted(storage: &S, tx_data: &[u8]) -> Result where S: StorageRead, { @@ -237,7 +226,7 @@ where pub fn get_proposal_code( storage: &S, proposal_id: u64, -) -> StorageResult>> +) -> Result>> where S: StorageRead, { @@ -249,7 +238,7 @@ where pub fn get_proposal_author( storage: &S, proposal_id: u64, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -258,7 +247,7 @@ where } /// Get governance parameters -pub fn get_parameters(storage: &S) -> StorageResult +pub fn get_parameters(storage: &S) -> Result where S: StorageRead, { @@ -295,7 +284,7 @@ where } /// Get governance "max_proposal_period" parameter -pub fn get_max_proposal_period(storage: &S) -> StorageResult +pub fn get_max_proposal_period(storage: &S) -> Result where S: StorageRead, { @@ -309,7 +298,7 @@ where pub fn get_proposal_result( storage: &S, proposal_id: u64, -) -> StorageResult> +) -> Result> where S: StorageRead, { diff --git a/crates/governance/src/storage/proposal.rs b/crates/governance/src/storage/proposal.rs index a4cf645066..c5f527f64b 100644 --- a/crates/governance/src/storage/proposal.rs +++ b/crates/governance/src/storage/proposal.rs @@ -3,10 +3,10 @@ use std::fmt::Display; use borsh::{BorshDeserialize, BorshSerialize}; use itertools::Itertools; +use namada_core::address::Address; +use namada_core::hash::Hash; use namada_core::ibc::core::host::types::identifiers::{ChannelId, PortId}; -use namada_core::types::address::Address; -use namada_core::types::hash::Hash; -use namada_core::types::storage::Epoch; +use namada_core::storage::Epoch; use namada_trans_token::Amount; use serde::{Deserialize, Serialize}; use thiserror::Error; @@ -617,10 +617,10 @@ impl Display for StorageProposal { #[cfg(any(test, feature = "testing"))] /// Testing helpers and and strategies for governance proposals pub mod testing { - use namada_core::types::address::testing::arb_non_internal_address; - use namada_core::types::hash::testing::arb_hash; - use namada_core::types::storage::testing::arb_epoch; - use namada_core::types::token::testing::arb_amount; + use namada_core::address::testing::arb_non_internal_address; + use namada_core::hash::testing::arb_hash; + use namada_core::storage::testing::arb_epoch; + use namada_core::token::testing::arb_amount; use proptest::prelude::*; use proptest::{collection, option, prop_compose}; @@ -644,18 +644,57 @@ pub mod testing { } prop_compose! { - /// Generate an arbitrary PGF target - pub fn arb_pgf_target()( + /// Generate an arbitrary PGF internal target + pub fn arb_pgf_internal_target()( target in arb_non_internal_address(), amount in arb_amount(), - ) -> PGFTarget { - PGFTarget::Internal(PGFInternalTarget { + ) -> PGFInternalTarget { + PGFInternalTarget { target, amount, - }) + } + } + } + + prop_compose! { + /// Generate an arbitrary port ID + pub fn arb_ibc_port_id()(id in "[a-zA-Z0-9_+.\\-\\[\\]#<>]{2,128}") -> PortId { + PortId::new(id).expect("generated invalid port ID") + } + } + + prop_compose! { + /// Generate an arbitrary channel ID + pub fn arb_ibc_channel_id()(id: u64) -> ChannelId { + ChannelId::new(id) + } + } + + prop_compose! { + /// Generate an arbitrary PGF IBC target + pub fn arb_pgf_ibc_target()( + target in "[a-zA-Z0-9_]*", + amount in arb_amount(), + port_id in arb_ibc_port_id(), + channel_id in arb_ibc_channel_id(), + ) -> PGFIbcTarget { + PGFIbcTarget { + target, + amount, + port_id, + channel_id, + } } } + /// Generate an arbitrary PGF target + pub fn arb_pgf_target() -> impl Strategy { + prop_oneof![ + arb_pgf_internal_target().prop_map(PGFTarget::Internal), + arb_pgf_ibc_target().prop_map(PGFTarget::Ibc), + ] + } + /// Generate an arbitrary PGF action pub fn arb_pgf_action() -> impl Strategy { prop_oneof![ diff --git a/crates/governance/src/utils.rs b/crates/governance/src/utils.rs index cf380fd82f..4619c90473 100644 --- a/crates/governance/src/utils.rs +++ b/crates/governance/src/utils.rs @@ -1,11 +1,11 @@ use std::collections::HashMap; use std::fmt::Display; +use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::address::Address; -use namada_core::types::dec::Dec; -use namada_core::types::storage::Epoch; -use namada_core::types::token; +use namada_core::dec::Dec; +use namada_core::storage::Epoch; +use namada_core::token; use super::cli::offline::OfflineVote; use super::storage::proposal::ProposalType; @@ -452,7 +452,7 @@ pub fn is_valid_validator_voting_period( mod test { use std::ops::{Add, Sub}; - use namada_core::types::address; + use namada_core::address; use super::*; diff --git a/crates/ibc/Cargo.toml b/crates/ibc/Cargo.toml index cb54e54832..da368475fe 100644 --- a/crates/ibc/Cargo.toml +++ b/crates/ibc/Cargo.toml @@ -18,11 +18,12 @@ testing = ["namada_core/testing", "ibc-testkit", "proptest"] [dependencies] namada_core = { path = "../core" } +namada_gas = { path = "../gas" } namada_governance = { path = "../governance" } namada_parameters = { path = "../parameters" } namada_state = { path = "../state" } namada_storage = { path = "../storage" } -namada_trans_token = { path = "../trans_token" } +namada_token = { path = "../token" } borsh.workspace = true ibc.workspace = true diff --git a/crates/ibc/src/actions.rs b/crates/ibc/src/actions.rs index eed8f284ae..699aba0c3c 100644 --- a/crates/ibc/src/actions.rs +++ b/crates/ibc/src/actions.rs @@ -3,78 +3,199 @@ use std::cell::RefCell; use std::rc::Rc; +use namada_core::address::{Address, InternalAddress}; use namada_core::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; use namada_core::ibc::apps::transfer::types::packet::PacketData; use namada_core::ibc::apps::transfer::types::PrefixedCoin; use namada_core::ibc::core::channel::types::timeout::TimeoutHeight; use namada_core::ibc::primitives::Msg; +use namada_core::ibc::IbcEvent; use namada_core::tendermint::Time as TmTime; -use namada_core::types::address::{Address, InternalAddress}; -use namada_core::types::hash::Hash; -use namada_core::types::ibc::IbcEvent; -use namada_core::types::storage::Epochs; -use namada_core::types::time::DateTimeUtc; -use namada_core::types::token::DenominatedAmount; +use namada_core::time::DateTimeUtc; +use namada_core::token::DenominatedAmount; use namada_governance::storage::proposal::PGFIbcTarget; use namada_parameters::read_epoch_duration_parameter; -use namada_state::wl_storage::{PrefixIter, WriteLogAndStorage}; -use namada_state::write_log::{self, WriteLog}; use namada_state::{ - self as storage, iter_prefix_post, DBIter, ResultExt, State, StorageError, - StorageHasher, StorageResult, StorageWrite, WlStorage, DB, + DBIter, Epochs, ResultExt, State, StateRead, StorageError, StorageHasher, + StorageRead, StorageResult, StorageWrite, TxHostEnvState, WlState, DB, }; -use namada_storage::StorageRead; -use namada_trans_token as token; +use namada_token as token; use crate::{IbcActions, IbcCommonContext, IbcStorageContext}; /// IBC protocol context #[derive(Debug)] -pub struct IbcProtocolContext<'a, D, H> +pub struct IbcProtocolContext<'a, S> where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, + S: State, { - wl_storage: &'a mut WlStorage, + state: &'a mut S, } -impl WriteLogAndStorage for IbcProtocolContext<'_, D, H> +impl StorageRead for IbcProtocolContext<'_, S> where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, + S: State, { - type D = D; - type H = H; + type PrefixIter<'iter> = ::PrefixIter<'iter> where Self: 'iter; - fn write_log(&self) -> &WriteLog { - self.wl_storage.write_log() + fn read_bytes( + &self, + key: &namada_storage::Key, + ) -> StorageResult>> { + self.state.read_bytes(key) + } + + fn has_key(&self, key: &namada_storage::Key) -> StorageResult { + self.state.has_key(key) + } + + fn iter_prefix<'iter>( + &'iter self, + prefix: &namada_storage::Key, + ) -> StorageResult> { + self.state.iter_prefix(prefix) + } + + fn iter_next<'iter>( + &'iter self, + iter: &mut Self::PrefixIter<'iter>, + ) -> StorageResult)>> { + self.state.iter_next(iter) + } + + fn get_chain_id(&self) -> StorageResult { + self.state.get_chain_id() } - fn write_log_mut(&mut self) -> &mut WriteLog { - self.wl_storage.write_log_mut() + fn get_block_height(&self) -> StorageResult { + self.state.get_block_height() + } + + fn get_block_header( + &self, + height: namada_storage::BlockHeight, + ) -> StorageResult> { + StorageRead::get_block_header(self.state, height) } - fn storage(&self) -> &State { - self.wl_storage.storage() + fn get_block_hash(&self) -> StorageResult { + self.state.get_block_hash() } - fn split_borrow(&mut self) -> (&mut WriteLog, &State) { - self.wl_storage.split_borrow() + fn get_block_epoch(&self) -> StorageResult { + self.state.get_block_epoch() } - fn write_tx_hash(&mut self, hash: Hash) -> write_log::Result<()> { - self.wl_storage.write_tx_hash(hash) + fn get_pred_epochs(&self) -> StorageResult { + self.state.get_pred_epochs() + } + + fn get_tx_index(&self) -> StorageResult { + self.state.get_tx_index() + } + + fn get_native_token(&self) -> StorageResult
{ + self.state.get_native_token() } } -namada_state::impl_storage_traits!(IbcProtocolContext<'_, D, H>); -impl IbcStorageContext for IbcProtocolContext<'_, D, H> +impl StorageWrite for IbcProtocolContext<'_, S> where - D: DB + for<'iter> DBIter<'iter> + 'static, - H: StorageHasher + 'static, + S: State, +{ + fn write_bytes( + &mut self, + key: &namada_storage::Key, + val: impl AsRef<[u8]>, + ) -> StorageResult<()> { + self.state.write_bytes(key, val) + } + + fn delete(&mut self, key: &namada_storage::Key) -> StorageResult<()> { + self.state.delete(key) + } +} + +impl IbcStorageContext for TxHostEnvState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<(), StorageError> { + self.write_log_mut().emit_ibc_event(event); + Ok(()) + } + + fn get_ibc_events( + &self, + event_type: impl AsRef, + ) -> Result, StorageError> { + Ok(self + .write_log() + .get_ibc_events() + .iter() + .filter(|event| event.event_type == event_type.as_ref()) + .cloned() + .collect()) + } + + fn transfer_token( + &mut self, + src: &Address, + dest: &Address, + token: &Address, + amount: DenominatedAmount, + ) -> Result<(), StorageError> { + token::transfer(self, token, src, dest, amount.amount()) + } + + fn handle_masp_tx( + &mut self, + shielded: &masp_primitives::transaction::Transaction, + pin_key: Option<&str>, + ) -> Result<(), StorageError> { + namada_token::utils::handle_masp_tx(self, shielded, pin_key)?; + namada_token::utils::update_note_commitment_tree(self, shielded) + } + + fn mint_token( + &mut self, + target: &Address, + token: &Address, + amount: DenominatedAmount, + ) -> Result<(), StorageError> { + token::credit_tokens(self, token, target, amount.amount())?; + let minter_key = token::storage_key::minter_key(token); + self.write(&minter_key, Address::Internal(InternalAddress::Ibc)) + } + + fn burn_token( + &mut self, + target: &Address, + token: &Address, + amount: DenominatedAmount, + ) -> Result<(), StorageError> { + token::burn_tokens(self, token, target, amount.amount()) + } + + fn log_string(&self, message: String) { + tracing::trace!(message); + } +} + +impl IbcCommonContext for TxHostEnvState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ +} + +impl IbcStorageContext for IbcProtocolContext<'_, S> +where + S: State, { fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<(), StorageError> { - self.wl_storage.write_log.emit_ibc_event(event); + self.state.write_log_mut().emit_ibc_event(event); Ok(()) } @@ -84,8 +205,8 @@ where event_type: impl AsRef, ) -> Result, StorageError> { Ok(self - .wl_storage - .write_log + .state + .write_log() .get_ibc_events() .iter() .filter(|event| event.event_type == event_type.as_ref()) @@ -101,7 +222,7 @@ where token: &Address, amount: DenominatedAmount, ) -> Result<(), StorageError> { - token::transfer(self, token, src, dest, amount.amount()) + token::transfer(self.state, token, src, dest, amount.amount()) } /// Handle masp tx @@ -120,9 +241,9 @@ where token: &Address, amount: DenominatedAmount, ) -> Result<(), StorageError> { - token::credit_tokens(self.wl_storage, token, target, amount.amount())?; + token::credit_tokens(self.state, token, target, amount.amount())?; let minter_key = token::storage_key::minter_key(token); - self.wl_storage + self.state .write(&minter_key, Address::Internal(InternalAddress::Ibc)) } @@ -133,7 +254,7 @@ where token: &Address, amount: DenominatedAmount, ) -> Result<(), StorageError> { - token::burn_tokens(self.wl_storage, token, target, amount.amount()) + token::burn_tokens(self.state, token, target, amount.amount()) } fn log_string(&self, message: String) { @@ -141,16 +262,11 @@ where } } -impl IbcCommonContext for IbcProtocolContext<'_, D, H> -where - D: DB + for<'iter> DBIter<'iter> + 'static, - H: StorageHasher + 'static, -{ -} +impl IbcCommonContext for IbcProtocolContext<'_, S> where S: State {} /// Transfer tokens over IBC pub fn transfer_over_ibc( - wl_storage: &mut WlStorage, + state: &mut WlState, token: &Address, source: &Address, target: &PGFIbcTarget, @@ -169,8 +285,8 @@ where receiver: target.target.clone().into(), memo: String::default().into(), }; - let timeout_timestamp = DateTimeUtc::now() - + read_epoch_duration_parameter(wl_storage)?.min_duration; + let timeout_timestamp = + DateTimeUtc::now() + read_epoch_duration_parameter(state)?.min_duration; let timeout_timestamp = TmTime::try_from(timeout_timestamp).into_storage_result()?; let ibc_message = MsgTransfer { @@ -184,7 +300,7 @@ where let mut data = vec![]; prost::Message::encode(&any_msg, &mut data).into_storage_result()?; - let ctx = IbcProtocolContext { wl_storage }; + let ctx = IbcProtocolContext { state }; let mut actions = IbcActions::new(Rc::new(RefCell::new(ctx))); actions.execute(&data).into_storage_result() } diff --git a/crates/ibc/src/context/common.rs b/crates/ibc/src/context/common.rs index b5b761bd86..1cb99c7fce 100644 --- a/crates/ibc/src/context/common.rs +++ b/crates/ibc/src/context/common.rs @@ -24,9 +24,9 @@ use namada_core::ibc::core::host::types::identifiers::{ }; use namada_core::ibc::primitives::proto::{Any, Protobuf}; use namada_core::ibc::primitives::Timestamp; +use namada_core::storage::{BlockHeight, Key}; use namada_core::tendermint::Time as TmTime; -use namada_core::types::storage::{BlockHeight, Key}; -use namada_core::types::time::DurationSecs; +use namada_core::time::DurationSecs; use namada_parameters::storage::get_max_expected_time_per_block_key; use prost::Message; use sha2::Digest; @@ -288,10 +288,7 @@ pub trait IbcCommonContext: IbcStorageContext { .time .try_into() .expect("The time should be converted"); - let next_validators_hash = header - .next_validators_hash - .try_into() - .expect("The hash should be converted"); + let next_validators_hash = header.next_validators_hash.into(); let consensus_state: TmConsensusState = TmConsensusStateType::new( commitment_root, time, diff --git a/crates/ibc/src/context/storage.rs b/crates/ibc/src/context/storage.rs index 05ab8121ef..c9d8218bd1 100644 --- a/crates/ibc/src/context/storage.rs +++ b/crates/ibc/src/context/storage.rs @@ -1,9 +1,9 @@ //! IBC storage context pub use ics23::ProofSpec; -use namada_core::types::address::Address; -use namada_core::types::ibc::IbcEvent; -use namada_core::types::token::DenominatedAmount; +use namada_core::address::Address; +use namada_core::ibc::IbcEvent; +use namada_core::token::DenominatedAmount; use namada_storage::{Error, StorageRead, StorageWrite}; /// IBC context trait to be implemented in integration that can read and write diff --git a/crates/ibc/src/context/token_transfer.rs b/crates/ibc/src/context/token_transfer.rs index 4b3e333ec4..29f4fd7bc1 100644 --- a/crates/ibc/src/context/token_transfer.rs +++ b/crates/ibc/src/context/token_transfer.rs @@ -3,6 +3,7 @@ use std::cell::RefCell; use std::rc::Rc; +use namada_core::address::{Address, InternalAddress}; use namada_core::ibc::apps::transfer::context::{ TokenTransferExecutionContext, TokenTransferValidationContext, }; @@ -11,10 +12,9 @@ use namada_core::ibc::apps::transfer::types::{PrefixedCoin, PrefixedDenom}; use namada_core::ibc::core::channel::types::error::ChannelError; use namada_core::ibc::core::handler::types::error::ContextError; use namada_core::ibc::core::host::types::identifiers::{ChannelId, PortId}; -use namada_core::types::address::{Address, InternalAddress}; -use namada_core::types::token; -use namada_core::types::uint::Uint; -use namada_trans_token::read_denom; +use namada_core::token; +use namada_core::uint::Uint; +use namada_token::read_denom; use super::common::IbcCommonContext; use crate::storage; diff --git a/crates/ibc/src/lib.rs b/crates/ibc/src/lib.rs index bea1b2ba4a..3f9a5fa09b 100644 --- a/crates/ibc/src/lib.rs +++ b/crates/ibc/src/lib.rs @@ -18,6 +18,7 @@ pub use context::token_transfer::TokenTransferContext; pub use context::transfer_mod::{ModuleWrapper, TransferModule}; use context::IbcContext; pub use context::ValidationParams; +use namada_core::address::{Address, MASP}; use namada_core::ibc::apps::transfer::handler::{ send_transfer_execute, send_transfer_validate, }; @@ -36,12 +37,7 @@ use namada_core::ibc::core::router::types::error::RouterError; use namada_core::ibc::core::router::types::module::ModuleId; use namada_core::ibc::primitives::proto::Any; pub use namada_core::ibc::*; -use namada_core::types::address::{Address, MASP}; -use namada_core::types::ibc::{ - get_shielded_transfer, is_ibc_denom, MsgShieldedTransfer, - EVENT_TYPE_DENOM_TRACE, EVENT_TYPE_PACKET, -}; -use namada_core::types::masp::PaymentAddress; +use namada_core::masp::PaymentAddress; use prost::Message; use thiserror::Error; @@ -348,7 +344,12 @@ pub fn received_ibc_token( TracePrefix::new(dest_port_id.clone(), dest_channel_id.clone()); ibc_denom.add_trace_prefix(prefix); } - Ok(storage::ibc_token(ibc_denom.to_string())) + if ibc_denom.trace_path.is_empty() { + Address::decode(ibc_denom.to_string()) + .map_err(|e| Error::Denom(format!("Invalid base denom: {e}"))) + } else { + Ok(storage::ibc_token(ibc_denom.to_string())) + } } #[cfg(any(test, feature = "testing"))] diff --git a/crates/ibc/src/storage.rs b/crates/ibc/src/storage.rs index 2137a6e6fa..c4aafc947d 100644 --- a/crates/ibc/src/storage.rs +++ b/crates/ibc/src/storage.rs @@ -2,6 +2,7 @@ use std::str::FromStr; +use namada_core::address::{Address, InternalAddress, HASH_LEN, SHA_HASH_LEN}; use namada_core::ibc::core::client::types::Height; use namada_core::ibc::core::host::types::identifiers::{ ChannelId, ClientId, ConnectionId, PortId, Sequence, @@ -11,11 +12,8 @@ use namada_core::ibc::core::host::types::path::{ ClientStatePath, CommitmentPath, ConnectionPath, Path, PortPath, ReceiptPath, SeqAckPath, SeqRecvPath, SeqSendPath, }; -use namada_core::types::address::{ - Address, InternalAddress, HASH_LEN, SHA_HASH_LEN, -}; -use namada_core::types::ibc::IbcTokenHash; -use namada_core::types::storage::{DbKeySeg, Key, KeySeg}; +use namada_core::ibc::IbcTokenHash; +use namada_core::storage::{DbKeySeg, Key, KeySeg}; use sha2::{Digest, Sha256}; use thiserror::Error; @@ -29,7 +27,7 @@ const DENOM: &str = "ibc_denom"; #[derive(Error, Debug)] pub enum Error { #[error("Storage key error: {0}")] - StorageKey(namada_core::types::storage::Error), + StorageKey(namada_core::storage::Error), #[error("Invalid Key: {0}")] InvalidKey(String), #[error("Port capability error: {0}")] diff --git a/crates/light_sdk/src/reading/asynchronous/account.rs b/crates/light_sdk/src/reading/asynchronous/account.rs index 8648ca7059..24b1c34e13 100644 --- a/crates/light_sdk/src/reading/asynchronous/account.rs +++ b/crates/light_sdk/src/reading/asynchronous/account.rs @@ -1,5 +1,5 @@ use namada_sdk::account::Account; -use namada_sdk::types::key::common; +use namada_sdk::key::common; use super::*; diff --git a/crates/light_sdk/src/reading/asynchronous/mod.rs b/crates/light_sdk/src/reading/asynchronous/mod.rs index 441d88746c..0b40764955 100644 --- a/crates/light_sdk/src/reading/asynchronous/mod.rs +++ b/crates/light_sdk/src/reading/asynchronous/mod.rs @@ -1,13 +1,13 @@ use std::str::FromStr; +use namada_sdk::address::Address; use namada_sdk::error::{EncodingError, Error}; use namada_sdk::io::StdIo; use namada_sdk::queries::RPC; use namada_sdk::rpc; use namada_sdk::state::LastBlock; -use namada_sdk::types::address::Address; -use namada_sdk::types::storage::BlockResults; -use namada_sdk::types::token::{self, DenominatedAmount}; +use namada_sdk::storage::BlockResults; +use namada_sdk::token::{self, DenominatedAmount}; use tendermint_config::net::Address as TendermintAddress; use tendermint_rpc::HttpClient; diff --git a/crates/light_sdk/src/reading/asynchronous/pos.rs b/crates/light_sdk/src/reading/asynchronous/pos.rs index 9c28beb957..9330db274d 100644 --- a/crates/light_sdk/src/reading/asynchronous/pos.rs +++ b/crates/light_sdk/src/reading/asynchronous/pos.rs @@ -1,13 +1,12 @@ use std::collections::{BTreeSet, HashMap, HashSet}; +use namada_sdk::key::common; use namada_sdk::proof_of_stake::types::{ BondsAndUnbondsDetails, CommissionPair, ValidatorMetaData, ValidatorState, }; use namada_sdk::proof_of_stake::PosParams; use namada_sdk::queries::vp::pos::EnrichedBondsAndUnbondsDetails; -use namada_sdk::types::address::Address; -use namada_sdk::types::key::common; -use namada_sdk::types::storage::{BlockHeight, Epoch}; +use namada_sdk::storage::{BlockHeight, Epoch}; use super::*; diff --git a/crates/light_sdk/src/reading/blocking/account.rs b/crates/light_sdk/src/reading/blocking/account.rs index 321178e62c..eb5bccc21d 100644 --- a/crates/light_sdk/src/reading/blocking/account.rs +++ b/crates/light_sdk/src/reading/blocking/account.rs @@ -1,5 +1,5 @@ use namada_sdk::account::Account; -use namada_sdk::types::key::common; +use namada_sdk::key::common; use super::*; diff --git a/crates/light_sdk/src/reading/blocking/mod.rs b/crates/light_sdk/src/reading/blocking/mod.rs index 5f7e79ca62..a70b4633e4 100644 --- a/crates/light_sdk/src/reading/blocking/mod.rs +++ b/crates/light_sdk/src/reading/blocking/mod.rs @@ -1,13 +1,13 @@ use std::str::FromStr; +use namada_sdk::address::Address; use namada_sdk::error::{EncodingError, Error}; use namada_sdk::io::StdIo; use namada_sdk::queries::RPC; use namada_sdk::rpc; use namada_sdk::state::LastBlock; -use namada_sdk::types::address::Address; -use namada_sdk::types::storage::BlockResults; -use namada_sdk::types::token::{self, DenominatedAmount}; +use namada_sdk::storage::BlockResults; +use namada_sdk::token::{self, DenominatedAmount}; use tendermint_config::net::Address as TendermintAddress; use tendermint_rpc::HttpClient; use tokio::runtime::Runtime; diff --git a/crates/light_sdk/src/reading/blocking/pos.rs b/crates/light_sdk/src/reading/blocking/pos.rs index a03aad078d..4a070d36f8 100644 --- a/crates/light_sdk/src/reading/blocking/pos.rs +++ b/crates/light_sdk/src/reading/blocking/pos.rs @@ -1,13 +1,13 @@ use std::collections::{BTreeSet, HashMap, HashSet}; +use namada_sdk::address::Address; +use namada_sdk::key::common; use namada_sdk::proof_of_stake::types::{ BondsAndUnbondsDetails, CommissionPair, ValidatorMetaData, ValidatorState, }; use namada_sdk::proof_of_stake::PosParams; use namada_sdk::queries::vp::pos::EnrichedBondsAndUnbondsDetails; -use namada_sdk::types::address::Address; -use namada_sdk::types::key::common; -use namada_sdk::types::storage::{BlockHeight, Epoch}; +use namada_sdk::storage::{BlockHeight, Epoch}; use super::*; diff --git a/crates/light_sdk/src/transaction/account.rs b/crates/light_sdk/src/transaction/account.rs index 8f98695ebd..e52435582f 100644 --- a/crates/light_sdk/src/transaction/account.rs +++ b/crates/light_sdk/src/transaction/account.rs @@ -1,10 +1,10 @@ +use namada_sdk::address::Address; +use namada_sdk::hash::Hash; +use namada_sdk::key::common; +use namada_sdk::storage::Epoch; +use namada_sdk::token::DenominatedAmount; use namada_sdk::tx::data::GasLimit; use namada_sdk::tx::{Signature, Tx, TxError}; -use namada_sdk::types::address::Address; -use namada_sdk::types::hash::Hash; -use namada_sdk::types::key::common; -use namada_sdk::types::storage::Epoch; -use namada_sdk::types::token::DenominatedAmount; use super::{attach_fee, attach_fee_signature, GlobalArgs}; use crate::transaction; diff --git a/crates/light_sdk/src/transaction/bridge.rs b/crates/light_sdk/src/transaction/bridge.rs index e365a8ad89..c1e01238c5 100644 --- a/crates/light_sdk/src/transaction/bridge.rs +++ b/crates/light_sdk/src/transaction/bridge.rs @@ -1,11 +1,11 @@ +use namada_sdk::address::Address; +pub use namada_sdk::eth_bridge_pool::{GasFee, TransferToEthereum}; +use namada_sdk::hash::Hash; +use namada_sdk::key::common; +use namada_sdk::storage::Epoch; +use namada_sdk::token::DenominatedAmount; use namada_sdk::tx::data::GasLimit; use namada_sdk::tx::{Signature, Tx, TxError}; -use namada_sdk::types::address::Address; -pub use namada_sdk::types::eth_bridge_pool::{GasFee, TransferToEthereum}; -use namada_sdk::types::hash::Hash; -use namada_sdk::types::key::common; -use namada_sdk::types::storage::Epoch; -use namada_sdk::types::token::DenominatedAmount; use super::{attach_fee, attach_fee_signature, GlobalArgs}; use crate::transaction; @@ -23,10 +23,7 @@ impl BridgeTransfer { args: GlobalArgs, ) -> Self { let pending_transfer = - namada_sdk::types::eth_bridge_pool::PendingTransfer { - transfer, - gas_fee, - }; + namada_sdk::eth_bridge_pool::PendingTransfer { transfer, gas_fee }; Self(transaction::build_tx( args, diff --git a/crates/light_sdk/src/transaction/governance.rs b/crates/light_sdk/src/transaction/governance.rs index 60ef066e24..a0ac5e96cc 100644 --- a/crates/light_sdk/src/transaction/governance.rs +++ b/crates/light_sdk/src/transaction/governance.rs @@ -1,11 +1,11 @@ +use namada_sdk::address::Address; use namada_sdk::governance::{ProposalType, ProposalVote}; +use namada_sdk::hash::Hash; +use namada_sdk::key::common; +use namada_sdk::storage::Epoch; +use namada_sdk::token::DenominatedAmount; use namada_sdk::tx::data::GasLimit; use namada_sdk::tx::{Signature, Tx, TxError}; -use namada_sdk::types::address::Address; -use namada_sdk::types::hash::Hash; -use namada_sdk::types::key::common; -use namada_sdk::types::storage::Epoch; -use namada_sdk::types::token::DenominatedAmount; use super::{attach_fee, attach_fee_signature, GlobalArgs}; use crate::transaction; diff --git a/crates/light_sdk/src/transaction/ibc.rs b/crates/light_sdk/src/transaction/ibc.rs index 016817820b..2ecc9b3825 100644 --- a/crates/light_sdk/src/transaction/ibc.rs +++ b/crates/light_sdk/src/transaction/ibc.rs @@ -1,15 +1,15 @@ use std::str::FromStr; +use namada_sdk::address::Address; +use namada_sdk::hash::Hash; pub use namada_sdk::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; use namada_sdk::ibc::primitives::Msg; +use namada_sdk::key::common; +use namada_sdk::storage::Epoch; +use namada_sdk::time::DateTimeUtc; +use namada_sdk::token::DenominatedAmount; use namada_sdk::tx::data::GasLimit; use namada_sdk::tx::{Signature, Tx, TxError}; -use namada_sdk::types::address::Address; -use namada_sdk::types::hash::Hash; -use namada_sdk::types::key::common; -use namada_sdk::types::storage::Epoch; -use namada_sdk::types::time::DateTimeUtc; -use namada_sdk::types::token::DenominatedAmount; use super::{attach_fee, attach_fee_signature, GlobalArgs}; use crate::transaction; diff --git a/crates/light_sdk/src/transaction/mod.rs b/crates/light_sdk/src/transaction/mod.rs index cf44997e37..598a665381 100644 --- a/crates/light_sdk/src/transaction/mod.rs +++ b/crates/light_sdk/src/transaction/mod.rs @@ -2,15 +2,15 @@ use std::collections::BTreeMap; use std::str::FromStr; use borsh::BorshSerialize; +use namada_sdk::address::Address; +use namada_sdk::chain::ChainId; +use namada_sdk::hash::Hash; +use namada_sdk::key::common; +use namada_sdk::storage::Epoch; +use namada_sdk::time::DateTimeUtc; +use namada_sdk::token::DenominatedAmount; use namada_sdk::tx::data::{Fee, GasLimit}; use namada_sdk::tx::{Section, Signature, Signer, Tx}; -use namada_sdk::types::address::Address; -use namada_sdk::types::chain::ChainId; -use namada_sdk::types::hash::Hash; -use namada_sdk::types::key::common; -use namada_sdk::types::storage::Epoch; -use namada_sdk::types::time::DateTimeUtc; -use namada_sdk::types::token::DenominatedAmount; pub mod account; pub mod bridge; diff --git a/crates/light_sdk/src/transaction/pgf.rs b/crates/light_sdk/src/transaction/pgf.rs index 572503573d..48afb38a68 100644 --- a/crates/light_sdk/src/transaction/pgf.rs +++ b/crates/light_sdk/src/transaction/pgf.rs @@ -1,13 +1,13 @@ use std::collections::HashMap; +use namada_sdk::address::Address; +use namada_sdk::dec::Dec; +use namada_sdk::hash::Hash; +use namada_sdk::key::common; +use namada_sdk::storage::Epoch; +use namada_sdk::token::DenominatedAmount; use namada_sdk::tx::data::GasLimit; use namada_sdk::tx::{Signature, Tx, TxError}; -use namada_sdk::types::address::Address; -use namada_sdk::types::dec::Dec; -use namada_sdk::types::hash::Hash; -use namada_sdk::types::key::common; -use namada_sdk::types::storage::Epoch; -use namada_sdk::types::token::DenominatedAmount; use super::{attach_fee, attach_fee_signature, GlobalArgs}; use crate::transaction; diff --git a/crates/light_sdk/src/transaction/pos.rs b/crates/light_sdk/src/transaction/pos.rs index 5b9ed197cb..39ae504c7a 100644 --- a/crates/light_sdk/src/transaction/pos.rs +++ b/crates/light_sdk/src/transaction/pos.rs @@ -1,13 +1,13 @@ +use namada_sdk::address::Address; +use namada_sdk::dec::Dec; +use namada_sdk::hash::Hash; +use namada_sdk::key::{common, secp256k1}; +use namada_sdk::storage::Epoch; +use namada_sdk::token; +use namada_sdk::token::{Amount, DenominatedAmount}; use namada_sdk::tx::data::pos::Redelegation; use namada_sdk::tx::data::GasLimit; use namada_sdk::tx::{Signature, Tx, TxError}; -use namada_sdk::types::address::Address; -use namada_sdk::types::dec::Dec; -use namada_sdk::types::hash::Hash; -use namada_sdk::types::key::{common, secp256k1}; -use namada_sdk::types::storage::Epoch; -use namada_sdk::types::token; -use namada_sdk::types::token::{Amount, DenominatedAmount}; use super::{attach_fee, attach_fee_signature, GlobalArgs}; use crate::transaction; diff --git a/crates/light_sdk/src/transaction/transfer.rs b/crates/light_sdk/src/transaction/transfer.rs index 1c749819e6..b548665086 100644 --- a/crates/light_sdk/src/transaction/transfer.rs +++ b/crates/light_sdk/src/transaction/transfer.rs @@ -1,11 +1,11 @@ use borsh_ext::BorshSerializeExt; +use namada_sdk::address::Address; +use namada_sdk::hash::Hash; +use namada_sdk::key::common; +use namada_sdk::storage::Epoch; +use namada_sdk::token::DenominatedAmount; use namada_sdk::tx::data::GasLimit; use namada_sdk::tx::{Signature, Tx, TxError}; -use namada_sdk::types::address::Address; -use namada_sdk::types::hash::Hash; -use namada_sdk::types::key::common; -use namada_sdk::types::storage::Epoch; -use namada_sdk::types::token::DenominatedAmount; use super::{attach_fee, attach_fee_signature, GlobalArgs}; use crate::transaction; @@ -27,7 +27,7 @@ impl Transfer { shielded: Option, args: GlobalArgs, ) -> Self { - let init_proposal = namada_sdk::types::token::Transfer { + let init_proposal = namada_sdk::token::Transfer { source, target, token, diff --git a/crates/light_sdk/src/transaction/wrapper.rs b/crates/light_sdk/src/transaction/wrapper.rs index 8f396cb144..fbc1ea953e 100644 --- a/crates/light_sdk/src/transaction/wrapper.rs +++ b/crates/light_sdk/src/transaction/wrapper.rs @@ -1,8 +1,8 @@ +use namada_sdk::hash::Hash; +use namada_sdk::key::common; +use namada_sdk::storage::Epoch; use namada_sdk::tx::data::{Fee, GasLimit}; use namada_sdk::tx::{Section, Signature, Signer, Tx, TxError}; -use namada_sdk::types::hash::Hash; -use namada_sdk::types::key::common; -use namada_sdk::types::storage::Epoch; #[allow(missing_docs)] pub struct Wrapper(Tx); diff --git a/crates/macros/src/lib.rs b/crates/macros/src/lib.rs index c3af2fd82f..a328ed250c 100644 --- a/crates/macros/src/lib.rs +++ b/crates/macros/src/lib.rs @@ -269,10 +269,10 @@ fn derive_storage_keys_inner(struct_def: TokenStream2) -> TokenStream2 { let id = syn::Ident::new(&id, ident.span()); quote! { #[allow(missing_docs)] - pub fn #id(key: &namada_core::types::storage::Key, address: &Address) -> bool { + pub fn #id(key: &namada_core::storage::Key, address: &Address) -> bool { matches!(&key.segments[..], [ - namada_core::types::storage::DbKeySeg::AddressSeg(a), - namada_core::types::storage::DbKeySeg::StringSeg(#ident), + namada_core::storage::DbKeySeg::AddressSeg(a), + namada_core::storage::DbKeySeg::StringSeg(#ident), ] if a == address && #ident == #struct_def_ident::VALUES.#ident) } } @@ -282,11 +282,11 @@ fn derive_storage_keys_inner(struct_def: TokenStream2) -> TokenStream2 { let id = syn::Ident::new(&id, ident.span()); quote! { #[allow(missing_docs)] - pub fn #id(address: Address) -> namada_core::types::storage::Key { - namada_core::types::storage::Key { + pub fn #id(address: Address) -> namada_core::storage::Key { + namada_core::storage::Key { segments: vec![ - namada_core::types::storage::DbKeySeg::AddressSeg(address), - namada_core::types::storage::DbKeySeg::StringSeg(#struct_def_ident::VALUES.#ident.to_string()), + namada_core::storage::DbKeySeg::AddressSeg(address), + namada_core::storage::DbKeySeg::StringSeg(#struct_def_ident::VALUES.#ident.to_string()), ], } } @@ -300,7 +300,7 @@ fn derive_storage_keys_inner(struct_def: TokenStream2) -> TokenStream2 { quote! { impl #struct_def_ident { /// A list of all storage keys - pub const ALL: &[&'static str] = { + pub const ALL: &'static [&'static str] = { let #struct_def_ident { #ident_list } = Self::VALUES; @@ -389,7 +389,7 @@ mod test_proc_macros { let expected_impl = quote! { impl Keys { /// A list of all storage keys - pub const ALL: &[&'static str] = { + pub const ALL: &'static [&'static str] = { let Keys { bird, is, the, word } = Self::VALUES; &[bird, is, the, word] }; @@ -403,66 +403,66 @@ mod test_proc_macros { }; } #[allow(missing_docs)] - pub fn is_bird_key_at_addr(key: &namada_core::types::storage::Key, address: &Address) -> bool { + pub fn is_bird_key_at_addr(key: &namada_core::storage::Key, address: &Address) -> bool { matches!(&key.segments[..], [ - namada_core::types::storage::DbKeySeg::AddressSeg(a), - namada_core::types::storage::DbKeySeg::StringSeg(bird), + namada_core::storage::DbKeySeg::AddressSeg(a), + namada_core::storage::DbKeySeg::StringSeg(bird), ] if a == address && bird == Keys::VALUES.bird) } #[allow(missing_docs)] - pub fn get_bird_key_at_addr(address: Address) -> namada_core::types::storage::Key { - namada_core::types::storage::Key { + pub fn get_bird_key_at_addr(address: Address) -> namada_core::storage::Key { + namada_core::storage::Key { segments: vec![ - namada_core::types::storage::DbKeySeg::AddressSeg(address), - namada_core::types::storage::DbKeySeg::StringSeg(Keys::VALUES.bird.to_string()), + namada_core::storage::DbKeySeg::AddressSeg(address), + namada_core::storage::DbKeySeg::StringSeg(Keys::VALUES.bird.to_string()), ], } } #[allow(missing_docs)] - pub fn is_is_key_at_addr(key: &namada_core::types::storage::Key, address: &Address) -> bool { + pub fn is_is_key_at_addr(key: &namada_core::storage::Key, address: &Address) -> bool { matches!(&key.segments[..], [ - namada_core::types::storage::DbKeySeg::AddressSeg(a), - namada_core::types::storage::DbKeySeg::StringSeg(is), + namada_core::storage::DbKeySeg::AddressSeg(a), + namada_core::storage::DbKeySeg::StringSeg(is), ] if a == address && is == Keys::VALUES.is) } #[allow(missing_docs)] - pub fn get_is_key_at_addr(address: Address) -> namada_core::types::storage::Key { - namada_core::types::storage::Key { + pub fn get_is_key_at_addr(address: Address) -> namada_core::storage::Key { + namada_core::storage::Key { segments: vec![ - namada_core::types::storage::DbKeySeg::AddressSeg(address), - namada_core::types::storage::DbKeySeg::StringSeg(Keys::VALUES.is.to_string()), + namada_core::storage::DbKeySeg::AddressSeg(address), + namada_core::storage::DbKeySeg::StringSeg(Keys::VALUES.is.to_string()), ], } } #[allow(missing_docs)] - pub fn is_the_key_at_addr(key: &namada_core::types::storage::Key, address: &Address) -> bool { + pub fn is_the_key_at_addr(key: &namada_core::storage::Key, address: &Address) -> bool { matches!(&key.segments[..], [ - namada_core::types::storage::DbKeySeg::AddressSeg(a), - namada_core::types::storage::DbKeySeg::StringSeg(the), + namada_core::storage::DbKeySeg::AddressSeg(a), + namada_core::storage::DbKeySeg::StringSeg(the), ] if a == address && the == Keys::VALUES.the) } #[allow(missing_docs)] - pub fn get_the_key_at_addr(address: Address) -> namada_core::types::storage::Key { - namada_core::types::storage::Key { + pub fn get_the_key_at_addr(address: Address) -> namada_core::storage::Key { + namada_core::storage::Key { segments: vec![ - namada_core::types::storage::DbKeySeg::AddressSeg(address), - namada_core::types::storage::DbKeySeg::StringSeg(Keys::VALUES.the.to_string()), + namada_core::storage::DbKeySeg::AddressSeg(address), + namada_core::storage::DbKeySeg::StringSeg(Keys::VALUES.the.to_string()), ], } } #[allow(missing_docs)] - pub fn is_word_key_at_addr(key: &namada_core::types::storage::Key, address: &Address) -> bool { + pub fn is_word_key_at_addr(key: &namada_core::storage::Key, address: &Address) -> bool { matches!(&key.segments[..], [ - namada_core::types::storage::DbKeySeg::AddressSeg(a), - namada_core::types::storage::DbKeySeg::StringSeg(word), + namada_core::storage::DbKeySeg::AddressSeg(a), + namada_core::storage::DbKeySeg::StringSeg(word), ] if a == address && word == Keys::VALUES.word) } #[allow(missing_docs)] - pub fn get_word_key_at_addr(address: Address) -> namada_core::types::storage::Key { - namada_core::types::storage::Key { + pub fn get_word_key_at_addr(address: Address) -> namada_core::storage::Key { + namada_core::storage::Key { segments: vec![ - namada_core::types::storage::DbKeySeg::AddressSeg(address), - namada_core::types::storage::DbKeySeg::StringSeg(Keys::VALUES.word.to_string()), + namada_core::storage::DbKeySeg::AddressSeg(address), + namada_core::storage::DbKeySeg::StringSeg(Keys::VALUES.word.to_string()), ], } } @@ -520,7 +520,7 @@ mod test_proc_macros { let expected_impl = quote! { impl Keys { /// A list of all storage keys - pub const ALL: &[&'static str] = { + pub const ALL: &'static [&'static str] = { let Keys { param1, param2 } = Self::VALUES; &[param1, param2] }; @@ -531,34 +531,34 @@ mod test_proc_macros { }; } #[allow(missing_docs)] - pub fn is_param1_key_at_addr(key: &namada_core::types::storage::Key, address: &Address) -> bool { + pub fn is_param1_key_at_addr(key: &namada_core::storage::Key, address: &Address) -> bool { matches!(&key.segments[..], [ - namada_core::types::storage::DbKeySeg::AddressSeg(a), - namada_core::types::storage::DbKeySeg::StringSeg(param1), + namada_core::storage::DbKeySeg::AddressSeg(a), + namada_core::storage::DbKeySeg::StringSeg(param1), ] if a == address && param1 == Keys::VALUES.param1) } #[allow(missing_docs)] - pub fn get_param1_key_at_addr(address: Address) -> namada_core::types::storage::Key { - namada_core::types::storage::Key { + pub fn get_param1_key_at_addr(address: Address) -> namada_core::storage::Key { + namada_core::storage::Key { segments: vec![ - namada_core::types::storage::DbKeySeg::AddressSeg(address), - namada_core::types::storage::DbKeySeg::StringSeg(Keys::VALUES.param1.to_string()), + namada_core::storage::DbKeySeg::AddressSeg(address), + namada_core::storage::DbKeySeg::StringSeg(Keys::VALUES.param1.to_string()), ], } } #[allow(missing_docs)] - pub fn is_param2_key_at_addr(key: &namada_core::types::storage::Key, address: &Address) -> bool { + pub fn is_param2_key_at_addr(key: &namada_core::storage::Key, address: &Address) -> bool { matches!(&key.segments[..], [ - namada_core::types::storage::DbKeySeg::AddressSeg(a), - namada_core::types::storage::DbKeySeg::StringSeg(param2), + namada_core::storage::DbKeySeg::AddressSeg(a), + namada_core::storage::DbKeySeg::StringSeg(param2), ] if a == address && param2 == Keys::VALUES.param2) } #[allow(missing_docs)] - pub fn get_param2_key_at_addr(address: Address) -> namada_core::types::storage::Key { - namada_core::types::storage::Key { + pub fn get_param2_key_at_addr(address: Address) -> namada_core::storage::Key { + namada_core::storage::Key { segments: vec![ - namada_core::types::storage::DbKeySeg::AddressSeg(address), - namada_core::types::storage::DbKeySeg::StringSeg(Keys::VALUES.param2.to_string()), + namada_core::storage::DbKeySeg::AddressSeg(address), + namada_core::storage::DbKeySeg::StringSeg(Keys::VALUES.param2.to_string()), ], } } diff --git a/crates/merkle_tree/Cargo.toml b/crates/merkle_tree/Cargo.toml index 329389a0d8..4fc284deb9 100644 --- a/crates/merkle_tree/Cargo.toml +++ b/crates/merkle_tree/Cargo.toml @@ -23,6 +23,8 @@ prost.workspace = true thiserror.workspace = true [dev-dependencies] +namada_core = { path = "../core", features = ["testing"] } + assert_matches.workspace = true proptest.workspace = true itertools.workspace = true diff --git a/crates/merkle_tree/src/eth_bridge_pool.rs b/crates/merkle_tree/src/eth_bridge_pool.rs index b8c30a36aa..12f0e9303c 100644 --- a/crates/merkle_tree/src/eth_bridge_pool.rs +++ b/crates/merkle_tree/src/eth_bridge_pool.rs @@ -4,12 +4,12 @@ use std::collections::{BTreeMap, BTreeSet}; use eyre::eyre; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use namada_core::types::eth_abi::{Encode, Token}; -use namada_core::types::eth_bridge_pool::PendingTransfer; -use namada_core::types::hash::Hash; -use namada_core::types::keccak::{keccak_hash, KeccakHash}; -use namada_core::types::storage; -use namada_core::types::storage::{BlockHeight, DbKeySeg}; +use namada_core::eth_abi::{Encode, Token}; +use namada_core::eth_bridge_pool::PendingTransfer; +use namada_core::hash::Hash; +use namada_core::keccak::{keccak_hash, KeccakHash}; +use namada_core::storage; +use namada_core::storage::{BlockHeight, DbKeySeg}; #[derive(thiserror::Error, Debug)] #[error(transparent)] @@ -373,12 +373,13 @@ mod test_bridge_pool_tree { use assert_matches::assert_matches; use itertools::Itertools; - use namada_core::types::address::{nam, Address}; - use namada_core::types::eth_bridge_pool::{ + use namada_core::address::testing::nam; + use namada_core::address::Address; + use namada_core::eth_bridge_pool::{ GasFee, TransferToEthereum, TransferToEthereumKind, }; - use namada_core::types::ethereum_events::EthAddress; - use namada_core::types::storage::Key; + use namada_core::ethereum_events::EthAddress; + use namada_core::storage::Key; use proptest::prelude::*; use super::*; diff --git a/crates/merkle_tree/src/ics23_specs.rs b/crates/merkle_tree/src/ics23_specs.rs index 130735d33a..c907b6b71b 100644 --- a/crates/merkle_tree/src/ics23_specs.rs +++ b/crates/merkle_tree/src/ics23_specs.rs @@ -2,7 +2,7 @@ use arse_merkle_tree::H256; use ics23::{HashOp, LeafOp, LengthOp, ProofSpec}; -use namada_core::types::hash::StorageHasher; +use namada_core::hash::StorageHasher; /// Get the leaf spec for the base tree. The key is stored after hashing, /// but the stored value is the subtree's root without hashing. diff --git a/crates/merkle_tree/src/lib.rs b/crates/merkle_tree/src/lib.rs index eb55878a82..45fd45653c 100644 --- a/crates/merkle_tree/src/lib.rs +++ b/crates/merkle_tree/src/lib.rs @@ -15,19 +15,17 @@ use eth_bridge_pool::{BridgePoolProof, BridgePoolTree}; use ics23::commitment_proof::Proof as Ics23Proof; use ics23::{CommitmentProof, ExistenceProof, NonExistenceProof}; use ics23_specs::ibc_leaf_spec; +use namada_core::address::{Address, InternalAddress}; use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; use namada_core::bytes::ByteBuf; -use namada_core::types::address::{Address, InternalAddress}; -use namada_core::types::eth_bridge_pool::{ - is_pending_transfer_key, PendingTransfer, -}; -use namada_core::types::hash::{Hash, StorageHasher}; -use namada_core::types::keccak::KeccakHash; -use namada_core::types::storage::{ +use namada_core::eth_bridge_pool::{is_pending_transfer_key, PendingTransfer}; +use namada_core::hash::{Hash, StorageHasher}; +use namada_core::keccak::KeccakHash; +use namada_core::storage::{ self, BlockHeight, DbKeySeg, Epoch, Error as StorageError, Key, KeySeg, StringKey, TreeBytes, TreeKeyError, IBC_KEY_LIMIT, }; -use namada_core::types::{self, DecodeError}; +use namada_core::{decode, DecodeError}; use thiserror::Error; /// Trait for reading from a merkle tree that is a sub-tree @@ -257,7 +255,7 @@ impl StoreType { if key.is_empty() { return Err(Error::EmptyKey("the key is empty".to_owned())); } - match key.segments.get(0) { + match key.segments.first() { Some(DbKeySeg::AddressSeg(Address::Internal(internal))) => { match internal { InternalAddress::PoS | InternalAddress::PosSlashPool => { @@ -303,11 +301,11 @@ impl StoreType { bytes: T, ) -> std::result::Result { match self { - Self::Base => Ok(Store::Base(types::decode(bytes)?)), - Self::Account => Ok(Store::Account(types::decode(bytes)?)), - Self::Ibc => Ok(Store::Ibc(types::decode(bytes)?)), - Self::PoS => Ok(Store::PoS(types::decode(bytes)?)), - Self::BridgePool => Ok(Store::BridgePool(types::decode(bytes)?)), + Self::Base => Ok(Store::Base(decode(bytes)?)), + Self::Account => Ok(Store::Account(decode(bytes)?)), + Self::Ibc => Ok(Store::Ibc(decode(bytes)?)), + Self::PoS => Ok(Store::PoS(decode(bytes)?)), + Self::BridgePool => Ok(Store::BridgePool(decode(bytes)?)), } } } @@ -1004,8 +1002,7 @@ impl<'a> SubTreeWrite for &'a mut BridgePoolTree { #[cfg(test)] mod test { use ics23::HostFunctionsManager; - use namada_core::types::hash::Sha256Hasher; - use namada_core::types::storage::KeySeg; + use namada_core::hash::Sha256Hasher; use super::*; use crate::ics23_specs::{ibc_proof_specs, proof_specs}; @@ -1150,7 +1147,7 @@ mod test { }; let proof = tree.get_sub_tree_proof(&ibc_key, proof).unwrap(); let (store_type, sub_key) = StoreType::sub_key(&ibc_key).unwrap(); - let paths = vec![sub_key.to_string(), store_type.to_string()]; + let paths = [sub_key.to_string(), store_type.to_string()]; let mut sub_root = ibc_val.clone(); let mut value = ibc_val; // First, the sub proof is verified. Next the base proof is verified @@ -1214,7 +1211,7 @@ mod test { let proof = tree.get_sub_tree_proof(&pos_key, proof).unwrap(); let (store_type, sub_key) = StoreType::sub_key(&pos_key).unwrap(); - let paths = vec![sub_key.to_string(), store_type.to_string()]; + let paths = [sub_key.to_string(), store_type.to_string()]; let mut sub_root = pos_val.clone(); let mut value = pos_val; // First, the sub proof is verified. Next the base proof is verified diff --git a/crates/namada/Cargo.toml b/crates/namada/Cargo.toml index 3026aff756..b2d9681245 100644 --- a/crates/namada/Cargo.toml +++ b/crates/namada/Cargo.toml @@ -83,6 +83,7 @@ namada_governance = { path = "../governance" } namada_ibc = { path = "../ibc" } namada_parameters = { path = "../parameters" } namada_proof_of_stake = { path = "../proof_of_stake" } +namada_replay_protection = { path = "../replay_protection" } namada_sdk = { path = "../sdk", default-features = false } namada_state = { path = "../state" } namada_token = { path = "../token" } diff --git a/crates/namada/src/ledger/governance/mod.rs b/crates/namada/src/ledger/governance/mod.rs index 9618ecf989..3ab335f8bb 100644 --- a/crates/namada/src/ledger/governance/mod.rs +++ b/crates/namada/src/ledger/governance/mod.rs @@ -13,17 +13,17 @@ use namada_governance::utils::is_valid_validator_voting_period; use namada_governance::ProposalVote; use namada_proof_of_stake::is_validator; use namada_proof_of_stake::queries::find_delegations; -use namada_state::StorageRead; +use namada_state::{StateRead, StorageRead}; use namada_tx::Tx; use namada_vp_env::VpEnv; use thiserror::Error; use self::utils::ReadType; +use crate::address::{Address, InternalAddress}; use crate::ledger::native_vp::{Ctx, NativeVp}; use crate::ledger::{native_vp, pos}; +use crate::storage::{Epoch, Key}; use crate::token; -use crate::types::address::{Address, InternalAddress}; -use crate::types::storage::{Epoch, Key}; use crate::vm::WasmCacheAccess; /// for handling Governance NativeVP errors @@ -47,20 +47,18 @@ pub enum Error { } /// Governance VP -pub struct GovernanceVp<'a, DB, H, CA> +pub struct GovernanceVp<'a, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: namada_state::StorageHasher, + S: StateRead, CA: WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'a, DB, H, CA>, + pub ctx: Ctx<'a, S, CA>, } -impl<'a, DB, H, CA> NativeVp for GovernanceVp<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for GovernanceVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; @@ -134,10 +132,9 @@ where } } -impl<'a, DB, H, CA> GovernanceVp<'a, DB, H, CA> +impl<'a, S, CA> GovernanceVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { fn is_valid_init_proposal_key_set( @@ -711,8 +708,7 @@ where delegation_address: &Address, ) -> Result where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { if !address.eq(delegation_address) { diff --git a/crates/namada/src/ledger/ibc/mod.rs b/crates/namada/src/ledger/ibc/mod.rs index 45f987e7a8..d8f89952fb 100644 --- a/crates/namada/src/ledger/ibc/mod.rs +++ b/crates/namada/src/ledger/ibc/mod.rs @@ -4,13 +4,12 @@ pub use namada_ibc::storage; use namada_ibc::storage::{ channel_counter_key, client_counter_key, connection_counter_key, }; -use namada_state::{StorageHasher, StorageWrite, WlStorage}; +use namada_state::State; /// Initialize storage in the genesis block. -pub fn init_genesis_storage(storage: &mut WlStorage) +pub fn init_genesis_storage(storage: &mut S) where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: State, { // In ibc-go, u64 like a counter is encoded with big-endian: // https://github.com/cosmos/ibc-go/blob/89ffaafb5956a5ea606e1f1bf249c880bea802ed/modules/core/04-channel/keeper/keeper.go#L115 diff --git a/crates/namada/src/ledger/mod.rs b/crates/namada/src/ledger/mod.rs index 5f54de80f2..ed59846e93 100644 --- a/crates/namada/src/ledger/mod.rs +++ b/crates/namada/src/ledger/mod.rs @@ -14,7 +14,6 @@ pub mod vp_host_fns; #[cfg(feature = "wasm-runtime")] pub use dry_run_tx::dry_run_tx; -pub use namada_core::ledger::replay_protection; pub use { namada_gas as gas, namada_parameters as parameters, namada_tx_env as tx_env, namada_vp_env as vp_env, @@ -22,6 +21,8 @@ pub use { #[cfg(feature = "wasm-runtime")] mod dry_run_tx { + use std::cell::RefCell; + use namada_sdk::queries::{EncodedResponseQuery, RequestCtx, RequestQuery}; use namada_state::{DBIter, ResultExt, StorageHasher, DB}; use namada_tx::data::GasLimit; @@ -31,8 +32,8 @@ mod dry_run_tx { use crate::vm::WasmCacheAccess; /// Dry run a transaction - pub fn dry_run_tx( - mut ctx: RequestCtx<'_, D, H, VpCache, TxCache>, + pub fn dry_run_tx<'a, D, H, CA>( + mut ctx: RequestCtx<'a, D, H, VpCache, TxCache>, request: &RequestQuery, ) -> namada_state::StorageResult where @@ -42,32 +43,31 @@ mod dry_run_tx { { use borsh_ext::BorshSerializeExt; use namada_gas::{Gas, GasMetering, TxGasMeter}; - use namada_state::TempWlStorage; use namada_tx::data::{DecryptedTx, TxType}; use namada_tx::Tx; use crate::ledger::protocol::ShellParams; - use crate::types::storage::TxIndex; + use crate::storage::TxIndex; + let mut temp_state = ctx.state.with_temp_write_log(); let mut tx = Tx::try_from(&request.data[..]).into_storage_result()?; tx.validate_tx().into_storage_result()?; - let mut temp_wl_storage = TempWlStorage::new(&ctx.wl_storage.storage); let mut cumulated_gas = Gas::default(); // Wrapper dry run to allow estimating the gas cost of a transaction - let mut tx_gas_meter = match tx.header().tx_type { + let tx_gas_meter = match tx.header().tx_type { TxType::Wrapper(wrapper) => { - let mut tx_gas_meter = - TxGasMeter::new(wrapper.gas_limit.to_owned()); + let tx_gas_meter = + RefCell::new(TxGasMeter::new(wrapper.gas_limit.to_owned())); protocol::apply_wrapper_tx( tx.clone(), &wrapper, None, &request.data, ShellParams::new( - &mut tx_gas_meter, - &mut temp_wl_storage, + &tx_gas_meter, + &mut temp_state, &mut ctx.vp_wasm_cache, &mut ctx.tx_wasm_cache, ), @@ -75,53 +75,53 @@ mod dry_run_tx { ) .into_storage_result()?; - temp_wl_storage.write_log.commit_tx(); - cumulated_gas = tx_gas_meter.get_tx_consumed_gas(); + temp_state.write_log_mut().commit_tx(); + cumulated_gas = tx_gas_meter.borrow_mut().get_tx_consumed_gas(); tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - TxGasMeter::new_from_sub_limit(tx_gas_meter.get_available_gas()) + let available_gas = tx_gas_meter.borrow().get_available_gas(); + TxGasMeter::new_from_sub_limit(available_gas) } TxType::Protocol(_) | TxType::Decrypted(_) => { - // If dry run only the inner tx, use the max block gas as the - // gas limit + // If dry run only the inner tx, use the max block gas as + // the gas limit TxGasMeter::new(GasLimit::from( - namada_parameters::get_max_block_gas(ctx.wl_storage) - .unwrap(), + namada_parameters::get_max_block_gas(ctx.state).unwrap(), )) } TxType::Raw => { // Cast tx to a decrypted for execution tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - // If dry run only the inner tx, use the max block gas as the - // gas limit + // If dry run only the inner tx, use the max block gas as + // the gas limit TxGasMeter::new(GasLimit::from( - namada_parameters::get_max_block_gas(ctx.wl_storage) - .unwrap(), + namada_parameters::get_max_block_gas(ctx.state).unwrap(), )) } }; + let tx_gas_meter = RefCell::new(tx_gas_meter); let mut data = protocol::apply_wasm_tx( tx, &TxIndex(0), ShellParams::new( - &mut tx_gas_meter, - &mut temp_wl_storage, + &tx_gas_meter, + &mut temp_state, &mut ctx.vp_wasm_cache, &mut ctx.tx_wasm_cache, ), ) .into_storage_result()?; cumulated_gas = cumulated_gas - .checked_add(tx_gas_meter.get_tx_consumed_gas()) + .checked_add(tx_gas_meter.borrow().get_tx_consumed_gas()) .ok_or(namada_state::StorageError::SimpleMessage( "Overflow in gas", ))?; // Account gas for both inner and wrapper (if available) data.gas_used = cumulated_gas; - // NOTE: the keys changed by the wrapper transaction (if any) are not - // returned from this function + // NOTE: the keys changed by the wrapper transaction (if any) are + // not returned from this function let data = data.serialize_to_vec(); Ok(EncodedResponseQuery { data, @@ -135,14 +135,14 @@ mod dry_run_tx { mod test { use borsh::BorshDeserialize; use borsh_ext::BorshSerializeExt; - use namada_core::types::address; - use namada_core::types::hash::Hash; - use namada_core::types::storage::{BlockHeight, Key}; + use namada_core::address; + use namada_core::hash::Hash; + use namada_core::storage::{BlockHeight, Key}; use namada_sdk::queries::{ EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, }; - use namada_sdk::tendermint_rpc::{self, Error as RpcError, Response}; - use namada_state::testing::TestWlStorage; + use namada_sdk::tendermint_rpc::{Error as RpcError, Response}; + use namada_state::testing::TestState; use namada_state::StorageWrite; use namada_test_utils::TestWasms; use namada_tx::data::decrypted::DecryptedTx; @@ -163,8 +163,8 @@ mod test { { /// RPC router pub rpc: RPC, - /// storage - pub wl_storage: TestWlStorage, + /// state + pub state: TestState, /// event log pub event_log: EventLog, /// VP wasm compilation cache @@ -185,17 +185,13 @@ mod test { /// Initialize a test client for the given root RPC router pub fn new(rpc: RPC) -> Self { // Initialize the `TestClient` - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); // Initialize mock gas limit let max_block_gas_key = namada_parameters::storage::get_max_block_gas_key(); - wl_storage - .storage - .write( - &max_block_gas_key, - namada_core::types::encode(&20_000_000_u64), - ) + state + .db_write(&max_block_gas_key, 20_000_000_u64.serialize_to_vec()) .expect( "Max block gas parameter must be initialized in storage", ); @@ -206,7 +202,7 @@ mod test { wasm::compilation_cache::common::testing::cache(); Self { rpc, - wl_storage, + state, event_log, vp_wasm_cache: vp_wasm_cache.read_only(), tx_wasm_cache: tx_wasm_cache.read_only(), @@ -242,7 +238,7 @@ mod test { prove, }; let ctx = RequestCtx { - wl_storage: &self.wl_storage, + state: &self.state, event_log: &self.event_log, vp_wasm_cache: self.vp_wasm_cache.clone(), tx_wasm_cache: self.tx_wasm_cache.clone(), @@ -262,7 +258,7 @@ mod test { async fn perform(&self, _request: R) -> Result where - R: tendermint_rpc::SimpleRequest, + R: namada_sdk::tendermint_rpc::SimpleRequest, { Ok(R::Response::from_string("TODO").unwrap().into()) } @@ -278,22 +274,21 @@ mod test { let tx_hash = Hash::sha256(&tx_no_op); let key = Key::wasm_code(&tx_hash); let len_key = Key::wasm_code_len(&tx_hash); - client.wl_storage.storage.write(&key, &tx_no_op).unwrap(); + client.state.db_write(&key, &tx_no_op).unwrap(); client - .wl_storage - .storage - .write(&len_key, (tx_no_op.len() as u64).serialize_to_vec()) + .state + .db_write(&len_key, (tx_no_op.len() as u64).serialize_to_vec()) .unwrap(); // Request last committed epoch let read_epoch = RPC.shell().epoch(&client).await.unwrap(); - let current_epoch = client.wl_storage.storage.last_epoch; + let current_epoch = client.state.in_mem().last_epoch; assert_eq!(current_epoch, read_epoch); // Request dry run tx let mut outer_tx = Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); - outer_tx.header.chain_id = client.wl_storage.storage.chain_id.clone(); + outer_tx.header.chain_id = client.state.in_mem().chain_id.clone(); outer_tx.set_code(Code::from_hash(tx_hash, None)); outer_tx.set_data(Data::new(vec![])); let tx_bytes = outer_tx.to_bytes(); @@ -335,10 +330,10 @@ mod test { // Then write some balance ... let balance = token::Amount::native_whole(1000); - StorageWrite::write(&mut client.wl_storage, &balance_key, balance)?; + StorageWrite::write(&mut client.state, &balance_key, balance)?; // It has to be committed to be visible in a query - client.wl_storage.commit_tx(); - client.wl_storage.commit_block().unwrap(); + client.state.commit_tx(); + client.state.commit_block().unwrap(); // ... there should be the same value now let read_balance = RPC .shell() diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs index 19e99a7475..331252256e 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs @@ -13,28 +13,29 @@ use std::borrow::Cow; use std::collections::BTreeSet; +use std::fmt::Debug; use std::marker::PhantomData; use borsh::BorshDeserialize; use eyre::eyre; +use namada_core::eth_bridge_pool::erc20_token_address; use namada_core::hints; -use namada_core::types::eth_bridge_pool::erc20_token_address; use namada_ethereum_bridge::storage::bridge_pool::{ get_pending_key, is_bridge_pool_key, BRIDGE_POOL_ADDRESS, }; use namada_ethereum_bridge::storage::parameters::read_native_erc20_address; use namada_ethereum_bridge::storage::whitelist; use namada_ethereum_bridge::ADDRESS as BRIDGE_ADDRESS; -use namada_state::{DBIter, StorageHasher, DB}; +use namada_state::StateRead; use namada_tx::Tx; +use crate::address::{Address, InternalAddress}; +use crate::eth_bridge_pool::{PendingTransfer, TransferToEthereumKind}; +use crate::ethereum_events::EthAddress; use crate::ledger::native_vp::{Ctx, NativeVp, StorageReader}; +use crate::storage::Key; use crate::token::storage_key::balance_key; use crate::token::Amount; -use crate::types::address::{Address, InternalAddress}; -use crate::types::eth_bridge_pool::{PendingTransfer, TransferToEthereumKind}; -use crate::types::ethereum_events::EthAddress; -use crate::types::storage::Key; use crate::vm::WasmCacheAccess; #[derive(thiserror::Error, Debug)] @@ -70,20 +71,18 @@ impl AmountDelta { } /// Validity predicate for the Ethereum bridge -pub struct BridgePoolVp<'ctx, D, H, CA> +pub struct BridgePoolVp<'ctx, S, CA> where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'ctx, D, H, CA>, + pub ctx: Ctx<'ctx, S, CA>, } -impl<'a, D, H, CA> BridgePoolVp<'a, D, H, CA> +impl<'a, S, CA> BridgePoolVp<'a, S, CA> where - D: 'static + DB + for<'iter> DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Get the change in the balance of an account @@ -334,7 +333,7 @@ where let same_sender_and_fee_payer = transfer.gas_fee.payer == transfer.transfer.sender; let gas_is_native_asset = - transfer.gas_fee.token == self.ctx.storage.native_token; + transfer.gas_fee.token == self.ctx.state.in_mem().native_token; let gas_and_token_is_native_asset = gas_is_native_asset && tok_is_native_asset; let same_token_and_gas_asset = @@ -366,7 +365,7 @@ where { // when minting wrapped NAM on Ethereum, escrow to the Ethereum // bridge address, and draw from NAM token accounts - let token = Cow::Borrowed(&self.ctx.storage.native_token); + let token = Cow::Borrowed(&self.ctx.state.in_mem().native_token); let escrow_account = &BRIDGE_ADDRESS; (token, escrow_account) } else { @@ -518,10 +517,9 @@ fn sum_gas_and_token_amounts( }) } -impl<'a, D, H, CA> NativeVp for BridgePoolVp<'a, D, H, CA> +impl<'a, S, CA> NativeVp for BridgePoolVp<'a, S, CA> where - D: 'static + DB + for<'iter> DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; @@ -639,30 +637,28 @@ where #[cfg(test)] mod test_bridge_pool_vp { + use std::cell::RefCell; use std::env::temp_dir; - use borsh::BorshDeserialize; use namada_core::borsh::BorshSerializeExt; - use namada_core::types::address; + use namada_core::validity_predicate::VpSentinel; use namada_ethereum_bridge::storage::bridge_pool::get_signed_root_key; use namada_ethereum_bridge::storage::parameters::{ Contracts, EthereumBridgeParams, UpgradeableContract, }; use namada_ethereum_bridge::storage::wrapped_erc20s; use namada_gas::TxGasMeter; + use namada_state::testing::TestState; use namada_state::StorageWrite; use namada_tx::data::TxType; use super::*; + use crate::address::testing::{nam, wnam}; + use crate::eth_bridge_pool::{GasFee, TransferToEthereum}; + use crate::hash::Hash; use crate::ledger::gas::VpGasMeter; - use crate::state::mockdb::MockDB; use crate::state::write_log::WriteLog; - use crate::state::{Sha256Hasher, State, WlStorage}; - use crate::types::address::{nam, wnam, InternalAddress}; - use crate::types::chain::ChainId; - use crate::types::eth_bridge_pool::{GasFee, TransferToEthereum}; - use crate::types::hash::Hash; - use crate::types::storage::TxIndex; + use crate::storage::TxIndex; use crate::vm::wasm::VpCache; use crate::vm::WasmCacheRwAccess; @@ -714,7 +710,7 @@ mod test_bridge_pool_vp { /// An implicit user address for testing & development #[allow(dead_code)] pub fn daewon_address() -> Address { - use crate::types::key::*; + use crate::key::*; pub fn daewon_keypair() -> common::SecretKey { let bytes = [ 235, 250, 15, 1, 145, 250, 172, 218, 247, 27, 63, 212, 60, 47, @@ -751,16 +747,16 @@ mod test_bridge_pool_vp { } } - /// Create a writelog representing storage before a transfer is added to the - /// pool. - fn new_writelog() -> WriteLog { - let mut writelog = WriteLog::default(); + /// Create a write-log representing storage before a transfer is added to + /// the pool. + fn new_write_log(write_log: &mut WriteLog) { + *write_log = WriteLog::default(); // setup the initial bridge pool storage - writelog + write_log .write(&get_signed_root_key(), Hash([0; 32]).serialize_to_vec()) .expect("Test failed"); let transfer = initial_pool(); - writelog + write_log .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .expect("Test failed"); // whitelist wnam @@ -769,7 +765,7 @@ mod test_bridge_pool_vp { suffix: whitelist::KeyType::Whitelisted, } .into(); - writelog + write_log .write(&key, true.serialize_to_vec()) .expect("Test failed"); let key = whitelist::Key { @@ -777,45 +773,44 @@ mod test_bridge_pool_vp { suffix: whitelist::KeyType::Cap, } .into(); - writelog + write_log .write(&key, Amount::max().serialize_to_vec()) .expect("Test failed"); // set up users with ERC20 and NUT balances update_balances( - &mut writelog, + write_log, Balance::new(TransferToEthereumKind::Erc20, bertha_address()), SignedAmount::Positive(BERTHA_WEALTH.into()), SignedAmount::Positive(BERTHA_TOKENS.into()), ); update_balances( - &mut writelog, + write_log, Balance::new(TransferToEthereumKind::Nut, daewon_address()), SignedAmount::Positive(DAEWONS_GAS.into()), SignedAmount::Positive(DAES_NUTS.into()), ); // set up the initial balances of the bridge pool update_balances( - &mut writelog, + write_log, Balance::new(TransferToEthereumKind::Erc20, BRIDGE_POOL_ADDRESS), SignedAmount::Positive(ESCROWED_AMOUNT.into()), SignedAmount::Positive(ESCROWED_TOKENS.into()), ); update_balances( - &mut writelog, + write_log, Balance::new(TransferToEthereumKind::Nut, BRIDGE_POOL_ADDRESS), SignedAmount::Positive(ESCROWED_AMOUNT.into()), SignedAmount::Positive(ESCROWED_NUTS.into()), ); // set up the initial balances of the ethereum bridge account update_balances( - &mut writelog, + write_log, Balance::new(TransferToEthereumKind::Erc20, BRIDGE_ADDRESS), SignedAmount::Positive(ESCROWED_AMOUNT.into()), // we only care about escrowing NAM SignedAmount::Positive(0.into()), ); - writelog.commit_tx(); - writelog + write_log.commit_tx(); } /// Update gas and token balances of an address and @@ -899,7 +894,7 @@ mod test_bridge_pool_vp { } /// Initialize some dummy storage for testing - fn setup_storage() -> WlStorage { + fn setup_storage() -> TestState { // a dummy config for testing let config = EthereumBridgeParams { erc20_whitelist: vec![], @@ -913,41 +908,30 @@ mod test_bridge_pool_vp { }, }, }; - let mut wl_storage = WlStorage { - storage: State::::open( - std::path::Path::new(""), - ChainId::default(), - address::nam(), - None, - None, - namada_sdk::state::merklize_all_keys, - ), - write_log: Default::default(), - }; - config.init_storage(&mut wl_storage); - wl_storage.commit_block().expect("Test failed"); - wl_storage.write_log = new_writelog(); - wl_storage.commit_block().expect("Test failed"); - wl_storage + let mut state = TestState::default(); + config.init_storage(&mut state); + state.commit_block().expect("Test failed"); + new_write_log(state.write_log_mut()); + state.commit_block().expect("Test failed"); + state } /// Setup a ctx for running native vps fn setup_ctx<'a>( tx: &'a Tx, - storage: &'a State, - write_log: &'a WriteLog, + state: &'a TestState, + gas_meter: &'a RefCell, + sentinel: &'a RefCell, keys_changed: &'a BTreeSet, verifiers: &'a BTreeSet
, - ) -> Ctx<'a, MockDB, Sha256Hasher, WasmCacheRwAccess> { + ) -> Ctx<'a, TestState, WasmCacheRwAccess> { Ctx::new( &BRIDGE_POOL_ADDRESS, - storage, - write_log, + state, tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + gas_meter, + sentinel, keys_changed, verifiers, VpCache::new(temp_dir(), 100usize), @@ -973,7 +957,7 @@ mod test_bridge_pool_vp { F: FnOnce(&mut PendingTransfer, &mut WriteLog) -> BTreeSet, { // setup - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); let tx = Tx::from_type(TxType::Raw); // the transfer to be added to the pool @@ -993,11 +977,11 @@ mod test_bridge_pool_vp { }; // add transfer to pool let mut keys_changed = - insert_transfer(&mut transfer, &mut wl_storage.write_log); + insert_transfer(&mut transfer, state.write_log_mut()); // change Bertha's balances let mut new_keys_changed = update_balances( - &mut wl_storage.write_log, + state.write_log_mut(), Balance { asset: transfer.transfer.asset, kind: TransferToEthereumKind::Erc20, @@ -1012,7 +996,7 @@ mod test_bridge_pool_vp { // change the bridge pool balances let mut new_keys_changed = update_balances( - &mut wl_storage.write_log, + state.write_log_mut(), Balance { asset: transfer.transfer.asset, kind: TransferToEthereumKind::Erc20, @@ -1026,17 +1010,22 @@ mod test_bridge_pool_vp { keys_changed.append(&mut new_keys_changed); let verifiers = BTreeSet::default(); // create the data to be given to the vp + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), }; - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); let res = vp.validate_tx(&tx, &keys_changed, &verifiers); @@ -1322,7 +1311,7 @@ mod test_bridge_pool_vp { #[test] fn test_adding_transfer_twice_fails() { // setup - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); let tx = Tx::from_type(TxType::Raw); // the transfer to be added to the pool @@ -1330,8 +1319,8 @@ mod test_bridge_pool_vp { // add transfer to pool let mut keys_changed = { - wl_storage - .write_log + state + .write_log_mut() .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) @@ -1339,7 +1328,7 @@ mod test_bridge_pool_vp { // update Bertha's balances let mut new_keys_changed = update_balances( - &mut wl_storage.write_log, + state.write_log_mut(), Balance { asset: ASSET, kind: TransferToEthereumKind::Erc20, @@ -1354,7 +1343,7 @@ mod test_bridge_pool_vp { // update the bridge pool balances let mut new_keys_changed = update_balances( - &mut wl_storage.write_log, + state.write_log_mut(), Balance { asset: ASSET, kind: TransferToEthereumKind::Erc20, @@ -1369,17 +1358,22 @@ mod test_bridge_pool_vp { let verifiers = BTreeSet::default(); // create the data to be given to the vp + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), }; - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); let res = vp.validate_tx(&tx, &keys_changed, &verifiers); @@ -1391,7 +1385,7 @@ mod test_bridge_pool_vp { #[test] fn test_zero_gas_fees_rejected() { // setup - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); let tx = Tx::from_type(TxType::Raw); // the transfer to be added to the pool @@ -1412,8 +1406,8 @@ mod test_bridge_pool_vp { // add transfer to pool let mut keys_changed = { - wl_storage - .write_log + state + .write_log_mut() .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) @@ -1430,17 +1424,22 @@ mod test_bridge_pool_vp { let verifiers = BTreeSet::default(); // create the data to be given to the vp + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), }; - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); let res = vp @@ -1454,7 +1453,7 @@ mod test_bridge_pool_vp { #[test] fn test_minting_wnam() { // setup - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); let eb_account_key = balance_key(&nam(), &Address::Internal(InternalAddress::EthBridge)); let tx = Tx::from_type(TxType::Raw); @@ -1477,8 +1476,8 @@ mod test_bridge_pool_vp { // add transfer to pool let mut keys_changed = { - wl_storage - .write_log + state + .write_log_mut() .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) @@ -1486,8 +1485,8 @@ mod test_bridge_pool_vp { // We escrow 100 Nam into the bridge pool VP // and 100 Nam in the Eth bridge VP let account_key = balance_key(&nam(), &bertha_address()); - wl_storage - .write_log + state + .write_log_mut() .write( &account_key, Amount::from(BERTHA_WEALTH - 200).serialize_to_vec(), @@ -1495,16 +1494,16 @@ mod test_bridge_pool_vp { .expect("Test failed"); assert!(keys_changed.insert(account_key)); let bp_account_key = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); - wl_storage - .write_log + state + .write_log_mut() .write( &bp_account_key, Amount::from(ESCROWED_AMOUNT + 100).serialize_to_vec(), ) .expect("Test failed"); assert!(keys_changed.insert(bp_account_key)); - wl_storage - .write_log + state + .write_log_mut() .write( &eb_account_key, Amount::from(ESCROWED_AMOUNT + 100).serialize_to_vec(), @@ -1514,17 +1513,22 @@ mod test_bridge_pool_vp { let verifiers = BTreeSet::default(); // create the data to be given to the vp + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), }; - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); let res = vp @@ -1539,7 +1543,7 @@ mod test_bridge_pool_vp { #[test] fn test_reject_mint_wnam() { // setup - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); let tx = Tx::from_type(TxType::Raw); let eb_account_key = balance_key(&nam(), &Address::Internal(InternalAddress::EthBridge)); @@ -1562,8 +1566,8 @@ mod test_bridge_pool_vp { // add transfer to pool let keys_changed = { - wl_storage - .write_log + state + .write_log_mut() .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) @@ -1571,39 +1575,44 @@ mod test_bridge_pool_vp { // We escrow 100 Nam into the bridge pool VP // and 100 Nam in the Eth bridge VP let account_key = balance_key(&nam(), &bertha_address()); - wl_storage - .write_log + state + .write_log_mut() .write( &account_key, Amount::from(BERTHA_WEALTH - 200).serialize_to_vec(), ) .expect("Test failed"); let bp_account_key = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); - wl_storage - .write_log + state + .write_log_mut() .write( &bp_account_key, Amount::from(ESCROWED_AMOUNT + 100).serialize_to_vec(), ) .expect("Test failed"); - wl_storage - .write_log + state + .write_log_mut() .write(&eb_account_key, Amount::from(10).serialize_to_vec()) .expect("Test failed"); let verifiers = BTreeSet::default(); // create the data to be given to the vp + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), }; - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); let res = vp @@ -1618,20 +1627,20 @@ mod test_bridge_pool_vp { #[test] fn test_mint_wnam_separate_gas_payer() { // setup - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); // initialize the eth bridge balance to 0 let eb_account_key = balance_key(&nam(), &Address::Internal(InternalAddress::EthBridge)); - wl_storage + state .write(&eb_account_key, Amount::default()) .expect("Test failed"); // initialize the gas payers account let gas_payer_balance_key = balance_key(&nam(), &established_address_1()); - wl_storage + state .write(&gas_payer_balance_key, Amount::from(BERTHA_WEALTH)) .expect("Test failed"); - wl_storage.write_log.commit_tx(); + state.write_log_mut().commit_tx(); let tx = Tx::from_type(TxType::Raw); // the transfer to be added to the pool @@ -1652,8 +1661,8 @@ mod test_bridge_pool_vp { // add transfer to pool let keys_changed = { - wl_storage - .write_log + state + .write_log_mut() .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) @@ -1661,45 +1670,50 @@ mod test_bridge_pool_vp { // We escrow 100 Nam into the bridge pool VP // and 100 Nam in the Eth bridge VP let account_key = balance_key(&nam(), &bertha_address()); - wl_storage - .write_log + state + .write_log_mut() .write( &account_key, Amount::from(BERTHA_WEALTH - 100).serialize_to_vec(), ) .expect("Test failed"); - wl_storage - .write_log + state + .write_log_mut() .write( &gas_payer_balance_key, Amount::from(BERTHA_WEALTH - 100).serialize_to_vec(), ) .expect("Test failed"); let bp_account_key = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); - wl_storage - .write_log + state + .write_log_mut() .write( &bp_account_key, Amount::from(ESCROWED_AMOUNT + 100).serialize_to_vec(), ) .expect("Test failed"); - wl_storage - .write_log + state + .write_log_mut() .write(&eb_account_key, Amount::from(10).serialize_to_vec()) .expect("Test failed"); let verifiers = BTreeSet::default(); // create the data to be given to the vp + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), }; - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); let res = vp @@ -1711,7 +1725,7 @@ mod test_bridge_pool_vp { /// Auxiliary function to test NUT functionality. fn test_nut_aux(kind: TransferToEthereumKind, expect: Expect) { // setup - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); let tx = Tx::from_type(TxType::Raw); // the transfer to be added to the pool @@ -1732,8 +1746,8 @@ mod test_bridge_pool_vp { // add transfer to pool let mut keys_changed = { - wl_storage - .write_log + state + .write_log_mut() .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) @@ -1741,7 +1755,7 @@ mod test_bridge_pool_vp { // update Daewon's balances let mut new_keys_changed = update_balances( - &mut wl_storage.write_log, + state.write_log_mut(), Balance { kind, asset: ASSET, @@ -1756,7 +1770,7 @@ mod test_bridge_pool_vp { // change the bridge pool balances let mut new_keys_changed = update_balances( - &mut wl_storage.write_log, + state.write_log_mut(), Balance { kind, asset: ASSET, @@ -1771,11 +1785,16 @@ mod test_bridge_pool_vp { // create the data to be given to the vp let verifiers = BTreeSet::default(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs index 5acd2927f2..acabe77b2d 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs @@ -3,9 +3,9 @@ use std::collections::BTreeSet; use eyre::WrapErr; -use namada_core::types::address::{Address, InternalAddress}; -use namada_core::types::storage::Key; -use namada_state::StorageHasher; +use namada_core::address::{Address, InternalAddress}; +use namada_core::storage::Key; +use namada_state::StateRead; use namada_tx::Tx; use namada_vp_env::VpEnv; @@ -23,20 +23,18 @@ pub struct Error(#[from] eyre::Report); /// /// All this VP does is reject NUT transfers whose destination /// address is not the Bridge pool escrow address. -pub struct NonUsableTokens<'ctx, DB, H, CA> +pub struct NonUsableTokens<'ctx, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'ctx, DB, H, CA>, + pub ctx: Ctx<'ctx, S, CA>, } -impl<'a, DB, H, CA> NativeVp for NonUsableTokens<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for NonUsableTokens<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; @@ -118,15 +116,17 @@ where #[cfg(test)] mod test_nuts { + use std::cell::RefCell; use std::env::temp_dir; use assert_matches::assert_matches; + use namada_core::address::testing::arb_non_internal_address; use namada_core::borsh::BorshSerializeExt; - use namada_core::types::address::testing::arb_non_internal_address; - use namada_core::types::ethereum_events::testing::DAI_ERC20_ETH_ADDRESS; - use namada_core::types::storage::TxIndex; + use namada_core::ethereum_events::testing::DAI_ERC20_ETH_ADDRESS; + use namada_core::storage::TxIndex; + use namada_core::validity_predicate::VpSentinel; use namada_ethereum_bridge::storage::wrapped_erc20s; - use namada_state::testing::TestWlStorage; + use namada_state::testing::TestState; use namada_state::StorageWrite; use namada_tx::data::TxType; use proptest::prelude::*; @@ -143,31 +143,35 @@ mod test_nuts { let src_balance_key = balance_key(&nut, &src); let dst_balance_key = balance_key(&nut, &dst); - let wl_storage = { - let mut wl = TestWlStorage::default(); + let state = { + let mut state = TestState::default(); // write initial balances - wl.write(&src_balance_key, Amount::from(200_u64)) + state + .write(&src_balance_key, Amount::from(200_u64)) .expect("Test failed"); - wl.write(&dst_balance_key, Amount::from(100_u64)) + state + .write(&dst_balance_key, Amount::from(100_u64)) .expect("Test failed"); - wl.commit_block().expect("Test failed"); + state.commit_block().expect("Test failed"); // write the updated balances - wl.write_log + state + .write_log_mut() .write( &src_balance_key, Amount::from(100_u64).serialize_to_vec(), ) .expect("Test failed"); - wl.write_log + state + .write_log_mut() .write( &dst_balance_key, Amount::from(200_u64).serialize_to_vec(), ) .expect("Test failed"); - wl + state }; let keys_changed = { @@ -183,15 +187,17 @@ mod test_nuts { }; let tx = Tx::from_type(TxType::Raw); - let ctx = Ctx::<_, _, WasmCacheRwAccess>::new( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); + let ctx = Ctx::<_, WasmCacheRwAccess>::new( &Address::Internal(InternalAddress::Nut(DAI_ERC20_ETH_ADDRESS)), - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, VpCache::new(temp_dir(), 100usize), diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs index b72fe7d13f..52f3012901 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs @@ -2,15 +2,14 @@ use std::collections::{BTreeSet, HashSet}; use eyre::{eyre, Result}; -use namada_core::types::address::Address; -use namada_core::types::hash::StorageHasher; -use namada_core::types::storage::Key; -use namada_ethereum_bridge; +use namada_core::address::Address; +use namada_core::storage::Key; use namada_ethereum_bridge::storage; use namada_ethereum_bridge::storage::escrow_key; use namada_tx::Tx; use crate::ledger::native_vp::{Ctx, NativeVp, StorageReader}; +use crate::state::StateRead; use crate::token::storage_key::{balance_key, is_balance_key}; use crate::token::Amount; use crate::vm::WasmCacheAccess; @@ -21,20 +20,18 @@ use crate::vm::WasmCacheAccess; pub struct Error(#[from] eyre::Error); /// Validity predicate for the Ethereum bridge -pub struct EthBridge<'ctx, DB, H, CA> +pub struct EthBridge<'ctx, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'ctx, DB, H, CA>, + pub ctx: Ctx<'ctx, S, CA>, } -impl<'ctx, DB, H, CA> EthBridge<'ctx, DB, H, CA> +impl<'ctx, S, CA> EthBridge<'ctx, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// If the Ethereum bridge's escrow key was written to, we check @@ -45,7 +42,7 @@ where verifiers: &BTreeSet
, ) -> Result { let escrow_key = balance_key( - &self.ctx.storage.native_token, + &self.ctx.state.in_mem().native_token, &crate::ethereum_bridge::ADDRESS, ); @@ -85,10 +82,9 @@ where } } -impl<'a, DB, H, CA> NativeVp for EthBridge<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for EthBridge<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; @@ -115,8 +111,10 @@ where "Ethereum Bridge VP triggered", ); - if !validate_changed_keys(&self.ctx.storage.native_token, keys_changed)? - { + if !validate_changed_keys( + &self.ctx.state.in_mem().native_token, + keys_changed, + )? { return Ok(false); } @@ -165,32 +163,29 @@ fn validate_changed_keys( #[cfg(test)] mod tests { - use std::default::Default; + use std::cell::RefCell; use std::env::temp_dir; use namada_core::borsh::BorshSerializeExt; + use namada_core::validity_predicate::VpSentinel; use namada_gas::TxGasMeter; + use namada_state::testing::TestState; use namada_state::StorageWrite; use namada_tx::data::TxType; - use namada_tx::Tx; use rand::Rng; use super::*; + use crate::address::testing::{established_address_1, nam, wnam}; use crate::ethereum_bridge::storage::bridge_pool::BRIDGE_POOL_ADDRESS; use crate::ethereum_bridge::storage::parameters::{ Contracts, EthereumBridgeParams, UpgradeableContract, }; use crate::ethereum_bridge::storage::wrapped_erc20s; + use crate::ethereum_events; + use crate::ethereum_events::EthAddress; use crate::ledger::gas::VpGasMeter; - use crate::state::mockdb::MockDB; - use crate::state::write_log::WriteLog; - use crate::state::{Sha256Hasher, State, WlStorage}; + use crate::storage::TxIndex; use crate::token::storage_key::minted_balance_key; - use crate::types::address::testing::established_address_1; - use crate::types::address::{nam, wnam}; - use crate::types::ethereum_events; - use crate::types::ethereum_events::EthAddress; - use crate::types::storage::TxIndex; use crate::vm::wasm::VpCache; use crate::vm::WasmCacheRwAccess; @@ -210,15 +205,15 @@ mod tests { } /// Initialize some dummy storage for testing - fn setup_storage() -> WlStorage { - let mut wl_storage = WlStorage::::default(); + fn setup_storage() -> TestState { + let mut state = TestState::default(); // setup a user with a balance let balance_key = balance_key( &nam(), &Address::decode(ARBITRARY_OWNER_A_ADDRESS).expect("Test failed"), ); - wl_storage + state .write( &balance_key, Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE), @@ -238,28 +233,27 @@ mod tests { }, }, }; - config.init_storage(&mut wl_storage); - wl_storage.commit_block().expect("Test failed"); - wl_storage + config.init_storage(&mut state); + state.commit_block().expect("Test failed"); + state } /// Setup a ctx for running native vps fn setup_ctx<'a>( tx: &'a Tx, - storage: &'a State, - write_log: &'a WriteLog, + state: &'a TestState, + gas_meter: &'a RefCell, + sentinel: &'a RefCell, keys_changed: &'a BTreeSet, verifiers: &'a BTreeSet
, - ) -> Ctx<'a, MockDB, Sha256Hasher, WasmCacheRwAccess> { + ) -> Ctx<'a, TestState, WasmCacheRwAccess> { Ctx::new( &crate::ethereum_bridge::ADDRESS, - storage, - write_log, + state, tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + gas_meter, + sentinel, keys_changed, verifiers, VpCache::new(temp_dir(), 100usize), @@ -354,14 +348,14 @@ mod tests { /// Test that escrowing Nam is accepted. #[test] fn test_escrow_nam_accepted() { - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); // debit the user's balance let account_key = balance_key( &nam(), &Address::decode(ARBITRARY_OWNER_A_ADDRESS).expect("Test failed"), ); - wl_storage - .write_log + state + .write_log_mut() .write( &account_key, Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE - ESCROW_AMOUNT) @@ -371,8 +365,8 @@ mod tests { // credit the balance to the escrow let escrow_key = balance_key(&nam(), &crate::ethereum_bridge::ADDRESS); - wl_storage - .write_log + state + .write_log_mut() .write( &escrow_key, Amount::from( @@ -387,11 +381,16 @@ mod tests { // set up the VP let tx = Tx::from_type(TxType::Raw); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = EthBridge { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), @@ -404,14 +403,14 @@ mod tests { /// Test that escrowing must increase the balance #[test] fn test_escrowed_nam_must_increase() { - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); // debit the user's balance let account_key = balance_key( &nam(), &Address::decode(ARBITRARY_OWNER_A_ADDRESS).expect("Test failed"), ); - wl_storage - .write_log + state + .write_log_mut() .write( &account_key, Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE - ESCROW_AMOUNT) @@ -421,8 +420,8 @@ mod tests { // do not credit the balance to the escrow let escrow_key = balance_key(&nam(), &crate::ethereum_bridge::ADDRESS); - wl_storage - .write_log + state + .write_log_mut() .write( &escrow_key, Amount::from(BRIDGE_POOL_ESCROW_INITIAL_BALANCE) @@ -435,11 +434,16 @@ mod tests { // set up the VP let tx = Tx::from_type(TxType::Raw); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = EthBridge { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), @@ -453,14 +457,14 @@ mod tests { /// be triggered if escrowing occurs. #[test] fn test_escrowing_must_trigger_bridge_pool_vp() { - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); // debit the user's balance let account_key = balance_key( &nam(), &Address::decode(ARBITRARY_OWNER_A_ADDRESS).expect("Test failed"), ); - wl_storage - .write_log + state + .write_log_mut() .write( &account_key, Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE - ESCROW_AMOUNT) @@ -470,8 +474,8 @@ mod tests { // credit the balance to the escrow let escrow_key = balance_key(&nam(), &crate::ethereum_bridge::ADDRESS); - wl_storage - .write_log + state + .write_log_mut() .write( &escrow_key, Amount::from( @@ -486,11 +490,16 @@ mod tests { // set up the VP let tx = Tx::from_type(TxType::Raw); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = EthBridge { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), diff --git a/crates/namada/src/ledger/native_vp/ibc/context.rs b/crates/namada/src/ledger/native_vp/ibc/context.rs index 8581e766ce..e4cfd76dd7 100644 --- a/crates/namada/src/ledger/native_vp/ibc/context.rs +++ b/crates/namada/src/ledger/native_vp/ibc/context.rs @@ -3,21 +3,18 @@ use std::collections::{BTreeSet, HashMap, HashSet}; use borsh_ext::BorshSerializeExt; -use ledger_storage::ResultExt; -use namada_core::types::storage::Epochs; +use namada_core::storage::Epochs; use namada_ibc::{IbcCommonContext, IbcStorageContext}; -use namada_state::{StorageError, StorageRead, StorageWrite}; +use namada_state::{StateRead, StorageError, StorageRead, StorageWrite}; +use crate::address::{Address, InternalAddress}; +use crate::ibc::IbcEvent; use crate::ledger::ibc::storage::is_ibc_key; use crate::ledger::native_vp::CtxPreStorageRead; use crate::state::write_log::StorageModification; -use crate::state::{self as ledger_storage, StorageHasher}; +use crate::state::{PrefixIter, ResultExt}; +use crate::storage::{BlockHash, BlockHeight, Epoch, Header, Key, TxIndex}; use crate::token::{self as token, Amount, DenominatedAmount}; -use crate::types::address::{Address, InternalAddress}; -use crate::types::ibc::IbcEvent; -use crate::types::storage::{ - BlockHash, BlockHeight, Epoch, Header, Key, TxIndex, -}; use crate::vm::WasmCacheAccess; /// Result of a storage API call. @@ -25,28 +22,26 @@ pub type Result = std::result::Result; /// Pseudo execution environment context for ibc native vp #[derive(Debug)] -pub struct PseudoExecutionContext<'view, 'a, DB, H, CA> +pub struct PseudoExecutionContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Temporary store for pseudo execution store: HashMap, /// Context to read the previous value - ctx: CtxPreStorageRead<'view, 'a, DB, H, CA>, + ctx: CtxPreStorageRead<'view, 'a, S, CA>, /// IBC event pub event: BTreeSet, } -impl<'view, 'a, DB, H, CA> PseudoExecutionContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> PseudoExecutionContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Generate new pseudo execution context - pub fn new(ctx: CtxPreStorageRead<'view, 'a, DB, H, CA>) -> Self { + pub fn new(ctx: CtxPreStorageRead<'view, 'a, S, CA>) -> Self { Self { store: HashMap::new(), ctx, @@ -68,14 +63,12 @@ where } } -impl<'view, 'a, DB, H, CA> StorageRead - for PseudoExecutionContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> StorageRead for PseudoExecutionContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { - type PrefixIter<'iter> = ledger_storage::PrefixIter<'iter, DB> where Self: 'iter; + type PrefixIter<'iter> = PrefixIter<'iter, ::D> where Self: 'iter; fn read_bytes(&self, key: &Key) -> Result>> { match self.store.get(key) { @@ -148,11 +141,9 @@ where } } -impl<'view, 'a, DB, H, CA> StorageWrite - for PseudoExecutionContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> StorageWrite for PseudoExecutionContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { fn write_bytes( @@ -175,11 +166,10 @@ where } } -impl<'view, 'a, DB, H, CA> IbcStorageContext - for PseudoExecutionContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> IbcStorageContext + for PseudoExecutionContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<()> { @@ -284,47 +274,42 @@ where } } -impl<'view, 'a, DB, H, CA> IbcCommonContext - for PseudoExecutionContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> IbcCommonContext + for PseudoExecutionContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { } /// Ibc native vp validation context #[derive(Debug)] -pub struct VpValidationContext<'view, 'a, DB, H, CA> +pub struct VpValidationContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Context to read the post value - ctx: CtxPreStorageRead<'view, 'a, DB, H, CA>, + ctx: CtxPreStorageRead<'view, 'a, S, CA>, } -impl<'view, 'a, DB, H, CA> VpValidationContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> VpValidationContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Generate a new ibc vp validation context - pub fn new(ctx: CtxPreStorageRead<'view, 'a, DB, H, CA>) -> Self { + pub fn new(ctx: CtxPreStorageRead<'view, 'a, S, CA>) -> Self { Self { ctx } } } -impl<'view, 'a, DB, H, CA> StorageRead - for VpValidationContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> StorageRead for VpValidationContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { - type PrefixIter<'iter> = ledger_storage::PrefixIter<'iter, DB> where Self: 'iter; + type PrefixIter<'iter> = PrefixIter<'iter, ::D> where Self: 'iter; fn read_bytes(&self, key: &Key) -> Result>> { self.ctx.read_bytes(key) @@ -381,11 +366,9 @@ where } } -impl<'view, 'a, DB, H, CA> StorageWrite - for VpValidationContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> StorageWrite for VpValidationContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { fn write_bytes( @@ -401,11 +384,10 @@ where } } -impl<'view, 'a, DB, H, CA> IbcStorageContext - for VpValidationContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> IbcStorageContext + for VpValidationContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { fn emit_ibc_event(&mut self, _event: IbcEvent) -> Result<()> { @@ -461,11 +443,10 @@ where } } -impl<'view, 'a, DB, H, CA> IbcCommonContext - for VpValidationContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> IbcCommonContext + for VpValidationContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { } diff --git a/crates/namada/src/ledger/native_vp/ibc/mod.rs b/crates/namada/src/ledger/native_vp/ibc/mod.rs index 7774c41cf1..e16f7af60f 100644 --- a/crates/namada/src/ledger/native_vp/ibc/mod.rs +++ b/crates/namada/src/ledger/native_vp/ibc/mod.rs @@ -8,15 +8,15 @@ use std::rc::Rc; use std::time::Duration; use context::{PseudoExecutionContext, VpValidationContext}; -use namada_core::types::address::Address; -use namada_core::types::storage::Key; +use namada_core::address::Address; +use namada_core::storage::Key; use namada_gas::{IBC_ACTION_EXECUTE_GAS, IBC_ACTION_VALIDATE_GAS}; use namada_ibc::{ Error as ActionError, IbcActions, TransferModule, ValidationParams, }; use namada_proof_of_stake::storage::read_pos_params; use namada_state::write_log::StorageModification; -use namada_state::StorageHasher; +use namada_state::StateRead; use namada_tx::Tx; use namada_vp_env::VpEnv; use thiserror::Error; @@ -48,20 +48,18 @@ pub enum Error { pub type VpResult = std::result::Result; /// IBC VP -pub struct Ibc<'a, DB, H, CA> +pub struct Ibc<'a, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'a, DB, H, CA>, + pub ctx: Ctx<'a, S, CA>, } -impl<'a, DB, H, CA> NativeVp for Ibc<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for Ibc<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; @@ -88,10 +86,9 @@ where } } -impl<'a, DB, H, CA> Ibc<'a, DB, H, CA> +impl<'a, S, CA> Ibc<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { fn validate_state( @@ -130,7 +127,7 @@ where } // check the event - let actual = self.ctx.write_log.get_ibc_events(); + let actual = self.ctx.state.write_log().get_ibc_events(); if *actual != ctx.borrow().event { return Err(Error::IbcEvent(format!( "The IBC event is invalid: Actual {:?}, Expected {:?}", @@ -162,7 +159,8 @@ where pub fn validation_params(&self) -> VpResult { use std::str::FromStr; let chain_id = self.ctx.get_chain_id().map_err(Error::NativeVpError)?; - let proof_specs = namada_state::ics23_specs::ibc_proof_specs::(); + let proof_specs = + namada_state::ics23_specs::ibc_proof_specs::<::H>(); let pos_params = read_pos_params(&self.ctx.post()).map_err(Error::NativeVpError)?; let pipeline_len = pos_params.pipeline_len; @@ -247,12 +245,12 @@ impl From for Error { /// A dummy header used for testing #[cfg(any(test, feature = "testing"))] -pub fn get_dummy_header() -> crate::types::storage::Header { +pub fn get_dummy_header() -> crate::storage::Header { use crate::tendermint::time::Time as TmTime; - crate::types::storage::Header { - hash: crate::types::hash::Hash([0; 32]), + crate::storage::Header { + hash: crate::hash::Hash([0; 32]), time: TmTime::now().try_into().unwrap(), - next_validators_hash: crate::types::hash::Hash([0; 32]), + next_validators_hash: crate::hash::Hash([0; 32]), } } @@ -260,11 +258,11 @@ pub fn get_dummy_header() -> crate::types::storage::Header { #[cfg(any(test, feature = "testing"))] pub fn get_dummy_genesis_validator() -> namada_proof_of_stake::types::GenesisValidator { - use crate::core::types::address::testing::established_address_1; - use crate::core::types::dec::Dec; - use crate::core::types::key::testing::common_sk_from_simple_seed; + use crate::core::address::testing::established_address_1; + use crate::core::dec::Dec; + use crate::core::key::testing::common_sk_from_simple_seed; + use crate::key; use crate::token::Amount; - use crate::types::key; let address = established_address_1(); let tokens = Amount::native_whole(1); @@ -306,8 +304,6 @@ pub fn get_dummy_genesis_validator() #[cfg(test)] mod tests { - use core::time::Duration; - use std::convert::TryFrom; use std::str::FromStr; use borsh::BorshDeserialize; @@ -317,21 +313,22 @@ mod tests { }; use ibc_testkit::testapp::ibc::clients::mock::consensus_state::MockConsensusState; use ibc_testkit::testapp::ibc::clients::mock::header::MockHeader; + use namada_core::validity_predicate::VpSentinel; use namada_gas::TxGasMeter; use namada_governance::parameters::GovernanceParameters; - use namada_state::testing::TestWlStorage; + use namada_state::testing::TestState; use namada_state::StorageRead; use namada_tx::data::TxType; - use namada_tx::{Code, Data, Section, Signature, Tx}; + use namada_tx::{Code, Data, Section, Signature}; use prost::Message; use sha2::Digest; use super::*; - use crate::core::types::address::testing::{ - established_address_1, established_address_2, + use crate::core::address::testing::{ + established_address_1, established_address_2, nam, }; - use crate::core::types::address::{nam, InternalAddress}; - use crate::core::types::storage::Epoch; + use crate::core::address::InternalAddress; + use crate::core::storage::Epoch; use crate::ibc::apps::transfer::types::events::{ AckEvent, DenomTraceEvent, RecvEvent, TimeoutEvent, TransferEvent, }; @@ -391,25 +388,25 @@ mod tests { use crate::ibc::primitives::proto::{Any, Protobuf}; use crate::ibc::primitives::{Msg, Timestamp}; use crate::ibc::storage::{ - ack_key, calc_hash, channel_counter_key, channel_key, - client_connections_key, client_counter_key, client_state_key, - client_update_height_key, client_update_timestamp_key, commitment_key, - connection_counter_key, connection_key, consensus_state_key, - ibc_denom_key, next_sequence_ack_key, next_sequence_recv_key, - next_sequence_send_key, receipt_key, + ack_key, channel_counter_key, channel_key, client_connections_key, + client_counter_key, client_state_key, client_update_height_key, + client_update_timestamp_key, commitment_key, connection_counter_key, + connection_key, consensus_state_key, ibc_denom_key, + next_sequence_ack_key, next_sequence_recv_key, next_sequence_send_key, + receipt_key, }; + use crate::key::testing::keypair_1; use crate::ledger::gas::VpGasMeter; use crate::ledger::parameters::storage::{ get_epoch_duration_storage_key, get_max_expected_time_per_block_key, }; use crate::ledger::parameters::EpochDuration; use crate::ledger::{ibc, pos}; + use crate::storage::{BlockHash, BlockHeight, TxIndex}; use crate::tendermint::time::Time as TmTime; + use crate::time::DurationSecs; use crate::token::storage_key::balance_key; use crate::token::Amount; - use crate::types::key::testing::keypair_1; - use crate::types::storage::{BlockHash, BlockHeight, TxIndex}; - use crate::types::time::DurationSecs; use crate::vm::wasm; const ADDRESS: Address = Address::Internal(InternalAddress::Ibc); @@ -421,15 +418,15 @@ mod tests { ClientId::from_str(&id).expect("Creating a client ID failed") } - fn init_storage() -> TestWlStorage { - let mut wl_storage = TestWlStorage::default(); + fn init_storage() -> TestState { + let mut state = TestState::default(); // initialize the storage - ibc::init_genesis_storage(&mut wl_storage); + ibc::init_genesis_storage(&mut state); let gov_params = GovernanceParameters::default(); - gov_params.init_storage(&mut wl_storage).unwrap(); + gov_params.init_storage(&mut state).unwrap(); pos::test_utils::test_init_genesis( - &mut wl_storage, + &mut state, namada_proof_of_stake::OwnedPosParams::default(), vec![get_dummy_genesis_validator()].into_iter(), Epoch(1), @@ -441,31 +438,31 @@ mod tests { min_num_of_blocks: 10, min_duration: DurationSecs(100), }; - wl_storage - .write_log + state + .write_log_mut() .write(&epoch_duration_key, epoch_duration.serialize_to_vec()) .expect("write failed"); // max_expected_time_per_block let time = DurationSecs::from(Duration::new(60, 0)); let time_key = get_max_expected_time_per_block_key(); - wl_storage - .write_log - .write(&time_key, namada_core::types::encode(&time)) + state + .write_log_mut() + .write(&time_key, namada_core::encode(&time)) .expect("write failed"); // set a dummy header - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(1)) .unwrap(); - wl_storage + state } - fn insert_init_client(wl_storage: &mut TestWlStorage) { + fn insert_init_client(state: &mut TestState) { // insert a mock client type let client_id = get_client_id(); // insert a mock client state @@ -477,41 +474,39 @@ mod tests { }; let client_state = MockClientState::new(header); let bytes = Protobuf::::encode_vec(client_state); - wl_storage - .write_log + state + .write_log_mut() .write(&client_state_key, bytes) .expect("write failed"); // insert a mock consensus state let consensus_key = consensus_state_key(&client_id, height); let consensus_state = MockConsensusState::new(header); let bytes = Protobuf::::encode_vec(consensus_state); - wl_storage - .write_log + state + .write_log_mut() .write(&consensus_key, bytes) .expect("write failed"); // insert update time and height let client_update_time_key = client_update_timestamp_key(&client_id); - let time = wl_storage - .storage - .get_block_header(None) + let time = StateRead::get_block_header(state, None) .unwrap() .0 .unwrap() .time; let bytes = TmTime::try_from(time).unwrap().encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&client_update_time_key, bytes) .expect("write failed"); let client_update_height_key = client_update_height_key(&client_id); - let host_height = wl_storage.storage.get_block_height().0; + let host_height = state.in_mem().get_block_height().0; let host_height = Height::new(0, host_height.0).expect("invalid height"); - wl_storage - .write_log + state + .write_log_mut() .write(&client_update_height_key, host_height.encode_vec()) .expect("write failed"); - wl_storage.write_log.commit_tx(); + state.write_log_mut().commit_tx(); } fn get_connection_id() -> ConnectionId { @@ -567,8 +562,8 @@ mod tests { ChanCounterparty::new(counterpart_port_id, Some(counterpart_channel_id)) } - fn get_next_seq(wl_storage: &TestWlStorage, key: &Key) -> Sequence { - let (val, _) = wl_storage.storage.read(key).expect("read failed"); + fn get_next_seq(state: &TestState, key: &Key) -> Sequence { + let (val, _) = state.db_read(key).expect("read failed"); match val { Some(v) => { // IBC related data is encoded without borsh @@ -581,8 +576,8 @@ mod tests { } } - fn increment_sequence(wl_storage: &mut TestWlStorage, key: &Key) { - let count = match wl_storage.read_bytes(key).expect("read failed") { + fn increment_sequence(state: &mut TestState, key: &Key) { + let count = match state.read_bytes(key).expect("read failed") { Some(value) => { let count: [u8; 8] = value.try_into().expect("decoding a count failed"); @@ -590,21 +585,21 @@ mod tests { } None => 0, }; - wl_storage - .write_log + state + .write_log_mut() .write(key, (count + 1).to_be_bytes().to_vec()) .expect("write failed"); } - fn increment_counter(wl_storage: &mut TestWlStorage, key: &Key) { - let count = match wl_storage.read_bytes(key).expect("read failed") { + fn increment_counter(state: &mut TestState, key: &Key) { + let count = match state.read_bytes(key).expect("read failed") { Some(value) => { u64::try_from_slice(&value).expect("invalid counter value") } None => unreachable!("The counter should be initialized"), }; - wl_storage - .write_log + state + .write_log_mut() .write(key, (count + 1).serialize_to_vec()) .expect("write failed"); } @@ -659,7 +654,7 @@ mod tests { #[test] fn test_create_client() { - let mut wl_storage = init_storage(); + let mut state = init_storage(); let mut keys_changed = BTreeSet::new(); let height = Height::new(0, 1).unwrap(); @@ -679,22 +674,22 @@ mod tests { // client state let client_state_key = client_state_key(&get_client_id()); let bytes = Protobuf::::encode_vec(client_state); - wl_storage - .write_log + state + .write_log_mut() .write(&client_state_key, bytes) .expect("write failed"); keys_changed.insert(client_state_key); // client consensus let consensus_key = consensus_state_key(&client_id, height); let bytes = Protobuf::::encode_vec(consensus_state); - wl_storage - .write_log + state + .write_log_mut() .write(&consensus_key, bytes) .expect("write failed"); keys_changed.insert(consensus_key); // client counter let client_counter_key = client_counter_key(); - increment_counter(&mut wl_storage, &client_counter_key); + increment_counter(&mut state, &client_counter_key); keys_changed.insert(client_counter_key); let event = RawIbcEvent::CreateClient(CreateClient::new( @@ -703,26 +698,26 @@ mod tests { client_state.latest_height(), )); let message_event = RawIbcEvent::Message(MessageEvent::Client); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = wl_storage.storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( @@ -730,13 +725,14 @@ mod tests { [(0, keypair_1())].into_iter().collect(), None, ))); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &outer_tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -752,19 +748,19 @@ mod tests { #[test] fn test_create_client_fail() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); // initialize the storage - ibc::init_genesis_storage(&mut wl_storage); + ibc::init_genesis_storage(&mut state); // set a dummy header - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(1)) .unwrap(); @@ -777,8 +773,8 @@ mod tests { let client_state = MockClientState::new(header); let client_state_key = client_state_key(&get_client_id()); let bytes = Protobuf::::encode_vec(client_state); - wl_storage - .write_log + state + .write_log_mut() .write(&client_state_key, bytes) .expect("write failed"); keys_changed.insert(client_state_key); @@ -796,25 +792,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -830,18 +827,18 @@ mod tests { #[test] fn test_update_client() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + let mut state = init_storage(); + insert_init_client(&mut state); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -863,8 +860,8 @@ mod tests { // client state let client_state = MockClientState::new(header); let bytes = Protobuf::::encode_vec(client_state); - wl_storage - .write_log + state + .write_log_mut() .write(&client_state_key, bytes) .expect("write failed"); keys_changed.insert(client_state_key); @@ -872,33 +869,31 @@ mod tests { let consensus_key = consensus_state_key(&client_id, height); let consensus_state = MockConsensusState::new(header); let bytes = Protobuf::::encode_vec(consensus_state); - wl_storage - .write_log + state + .write_log_mut() .write(&consensus_key, bytes) .expect("write failed"); keys_changed.insert(consensus_key); // client update time let client_update_time_key = client_update_timestamp_key(&client_id); - let time = wl_storage - .storage - .get_block_header(None) + let time = StateRead::get_block_header(&state, None) .unwrap() .0 .unwrap() .time; let bytes = TmTime::try_from(time).unwrap().encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&client_update_time_key, bytes) .expect("write failed"); keys_changed.insert(client_update_time_key); // client update height let client_update_height_key = client_update_height_key(&client_id); - let host_height = wl_storage.storage.get_block_height().0; + let host_height = state.in_mem().get_block_height().0; let host_height = Height::new(0, host_height.0).expect("invalid height"); - wl_storage - .write_log + state + .write_log_mut() .write(&client_update_height_key, host_height.encode_vec()) .expect("write failed"); keys_changed.insert(client_update_height_key); @@ -912,11 +907,11 @@ mod tests { Protobuf::::encode_vec(header), )); let message_event = RawIbcEvent::Message(MessageEvent::Client); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -924,25 +919,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -958,17 +954,17 @@ mod tests { #[test] fn test_init_connection() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + let mut state = init_storage(); + insert_init_client(&mut state); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -995,8 +991,8 @@ mod tests { ) .expect("invalid connection"); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); keys_changed.insert(conn_key); @@ -1004,14 +1000,14 @@ mod tests { let client_conn_key = client_connections_key(&msg.client_id_on_a); let conn_list = conn_id.to_string(); let bytes = conn_list.serialize_to_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&client_conn_key, bytes) .expect("write failed"); keys_changed.insert(client_conn_key); // connection counter let conn_counter_key = connection_counter_key(); - increment_counter(&mut wl_storage, &conn_counter_key); + increment_counter(&mut state, &conn_counter_key); keys_changed.insert(conn_counter_key); // event let event = RawIbcEvent::OpenInitConnection(ConnOpenInit::new( @@ -1020,11 +1016,11 @@ mod tests { msg.counterparty.client_id().clone(), )); let message_event = RawIbcEvent::Message(MessageEvent::Connection); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -1032,7 +1028,7 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = wl_storage.storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( @@ -1040,20 +1036,21 @@ mod tests { [(0, keypair_1())].into_iter().collect(), None, ))); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &outer_tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1068,18 +1065,18 @@ mod tests { #[test] fn test_init_connection_fail() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); // initialize the storage - ibc::init_genesis_storage(&mut wl_storage); + ibc::init_genesis_storage(&mut state); // set a dummy header - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(1)) .unwrap(); @@ -1106,8 +1103,8 @@ mod tests { ) .expect("invalid connection"); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); keys_changed.insert(conn_key); @@ -1115,14 +1112,14 @@ mod tests { let client_conn_key = client_connections_key(&msg.client_id_on_a); let conn_list = conn_id.to_string(); let bytes = conn_list.serialize_to_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&client_conn_key, bytes) .expect("write failed"); keys_changed.insert(client_conn_key); // connection counter let conn_counter_key = connection_counter_key(); - increment_counter(&mut wl_storage, &conn_counter_key); + increment_counter(&mut state, &conn_counter_key); keys_changed.insert(conn_counter_key); // No event @@ -1131,25 +1128,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1164,17 +1162,17 @@ mod tests { #[test] fn test_try_connection() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + let mut state = init_storage(); + insert_init_client(&mut state); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -1215,8 +1213,8 @@ mod tests { ) .expect("invalid connection"); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); keys_changed.insert(conn_key); @@ -1224,14 +1222,14 @@ mod tests { let client_conn_key = client_connections_key(&msg.client_id_on_b); let conn_list = conn_id.to_string(); let bytes = conn_list.serialize_to_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&client_conn_key, bytes) .expect("write failed"); keys_changed.insert(client_conn_key); // connection counter let conn_counter_key = connection_counter_key(); - increment_counter(&mut wl_storage, &conn_counter_key); + increment_counter(&mut state, &conn_counter_key); keys_changed.insert(conn_counter_key); // event let event = RawIbcEvent::OpenTryConnection(ConnOpenTry::new( @@ -1241,36 +1239,37 @@ mod tests { msg.counterparty.client_id().clone(), )); let message_event = RawIbcEvent::Message(MessageEvent::Connection); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1286,34 +1285,34 @@ mod tests { #[test] fn test_ack_connection() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an Init connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Init); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); // update the connection to Open let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); keys_changed.insert(conn_key); @@ -1349,11 +1348,11 @@ mod tests { counterparty.client_id().clone(), )); let message_event = RawIbcEvent::Message(MessageEvent::Connection); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_code = vec![]; @@ -1361,7 +1360,7 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = wl_storage.storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( @@ -1369,20 +1368,21 @@ mod tests { [(0, keypair_1())].into_iter().collect(), None, ))); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &outer_tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1397,34 +1397,34 @@ mod tests { #[test] fn test_confirm_connection() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert a TryOpen connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::TryOpen); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); // update the connection to Open let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); keys_changed.insert(conn_key); @@ -1446,11 +1446,11 @@ mod tests { counterparty.client_id().clone(), )); let message_event = RawIbcEvent::Message(MessageEvent::Connection); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_code = vec![]; @@ -1458,7 +1458,7 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = wl_storage.storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( @@ -1466,20 +1466,21 @@ mod tests { [(0, keypair_1())].into_iter().collect(), None, ))); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &outer_tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1494,27 +1495,27 @@ mod tests { #[test] fn test_init_channel() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an opened connection let conn_id = get_connection_id(); let conn_key = connection_key(&conn_id); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -1541,26 +1542,26 @@ mod tests { ) .unwrap(); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); keys_changed.insert(channel_key); // channel counter let chan_counter_key = channel_counter_key(); - increment_counter(&mut wl_storage, &chan_counter_key); + increment_counter(&mut state, &chan_counter_key); keys_changed.insert(chan_counter_key); // sequences let channel_id = get_channel_id(); let port_id = msg.port_id_on_a.clone(); let send_key = next_sequence_send_key(&port_id, &channel_id); - increment_sequence(&mut wl_storage, &send_key); + increment_sequence(&mut state, &send_key); keys_changed.insert(send_key); let recv_key = next_sequence_recv_key(&port_id, &channel_id); - increment_sequence(&mut wl_storage, &recv_key); + increment_sequence(&mut state, &recv_key); keys_changed.insert(recv_key); let ack_key = next_sequence_ack_key(&port_id, &channel_id); - increment_sequence(&mut wl_storage, &ack_key); + increment_sequence(&mut state, &ack_key); keys_changed.insert(ack_key); // event let event = RawIbcEvent::OpenInitChannel(ChanOpenInit::new( @@ -1571,11 +1572,11 @@ mod tests { msg.version_proposal.clone(), )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -1583,7 +1584,7 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = wl_storage.storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( @@ -1591,20 +1592,21 @@ mod tests { [(0, keypair_1())].into_iter().collect(), None, ))); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &outer_tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1619,26 +1621,26 @@ mod tests { #[test] fn test_try_channel() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -1664,26 +1666,26 @@ mod tests { let channel_key = channel_key(&get_port_id(), &get_channel_id()); let channel = get_channel(ChanState::TryOpen, Order::Unordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); keys_changed.insert(channel_key); // channel counter let chan_counter_key = channel_counter_key(); - increment_counter(&mut wl_storage, &chan_counter_key); + increment_counter(&mut state, &chan_counter_key); keys_changed.insert(chan_counter_key); // sequences let channel_id = get_channel_id(); let port_id = msg.port_id_on_a.clone(); let send_key = next_sequence_send_key(&port_id, &channel_id); - increment_sequence(&mut wl_storage, &send_key); + increment_sequence(&mut state, &send_key); keys_changed.insert(send_key); let recv_key = next_sequence_recv_key(&port_id, &channel_id); - increment_sequence(&mut wl_storage, &recv_key); + increment_sequence(&mut state, &recv_key); keys_changed.insert(recv_key); let ack_key = next_sequence_ack_key(&port_id, &channel_id); - increment_sequence(&mut wl_storage, &ack_key); + increment_sequence(&mut state, &ack_key); keys_changed.insert(ack_key); // event let event = RawIbcEvent::OpenTryChannel(ChanOpenTry::new( @@ -1695,11 +1697,11 @@ mod tests { msg.version_supported_on_a.clone(), )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -1707,7 +1709,7 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = wl_storage.storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( @@ -1715,20 +1717,21 @@ mod tests { [(0, keypair_1())].into_iter().collect(), None, ))); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &outer_tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1743,34 +1746,34 @@ mod tests { #[test] fn test_ack_channel() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); // insert an Init channel let channel_key = channel_key(&get_port_id(), &get_channel_id()); let channel = get_channel(ChanState::Init, Order::Unordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -1790,8 +1793,8 @@ mod tests { // update the channel to Open let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); keys_changed.insert(channel_key); @@ -1804,11 +1807,11 @@ mod tests { get_connection_id(), )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -1816,7 +1819,7 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = wl_storage.storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( @@ -1824,20 +1827,21 @@ mod tests { [(0, keypair_1())].into_iter().collect(), None, ))); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &outer_tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1852,34 +1856,34 @@ mod tests { #[test] fn test_confirm_channel() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); // insert a TryOpen channel let channel_key = channel_key(&get_port_id(), &get_channel_id()); let channel = get_channel(ChanState::TryOpen, Order::Ordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -1896,8 +1900,8 @@ mod tests { // update the channel to Open let channel = get_channel(ChanState::Open, Order::Ordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); keys_changed.insert(channel_key); @@ -1911,11 +1915,11 @@ mod tests { get_connection_id(), )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -1923,25 +1927,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1959,42 +1964,42 @@ mod tests { #[test] fn test_send_packet() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); // insert an Open channel let channel_key = channel_key(&get_port_id(), &get_channel_id()); let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); // init balance let sender = established_address_1(); let balance_key = balance_key(&nam(), &sender); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&balance_key, amount.serialize_to_vec()) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -2017,9 +2022,9 @@ mod tests { // the sequence send let seq_key = next_sequence_send_key(&get_port_id(), &get_channel_id()); - let sequence = get_next_seq(&wl_storage, &seq_key); - wl_storage - .write_log + let sequence = get_next_seq(&state, &seq_key); + state + .write_log_mut() .write(&seq_key, (u64::from(sequence) + 1).to_be_bytes().to_vec()) .expect("write failed"); keys_changed.insert(seq_key); @@ -2030,8 +2035,8 @@ mod tests { commitment_key(&msg.port_id_on_a, &msg.chan_id_on_a, sequence); let commitment = commitment(&packet); let bytes = commitment.into_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&commitment_key, bytes) .expect("write failed"); keys_changed.insert(commitment_key); @@ -2044,8 +2049,8 @@ mod tests { memo: msg.packet_data.memo.clone(), }; let event = RawIbcEvent::Module(ModuleEvent::from(transfer_event)); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let event = RawIbcEvent::SendPacket(SendPacket::new( packet, @@ -2053,11 +2058,11 @@ mod tests { get_connection_id(), )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -2065,25 +2070,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -2098,34 +2104,34 @@ mod tests { #[test] fn test_recv_packet() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); // insert an open channel let channel_key = channel_key(&get_port_id(), &get_channel_id()); let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -2168,8 +2174,8 @@ mod tests { msg.packet.seq_on_a, ); let bytes = [1_u8].to_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&receipt_key, bytes) .expect("write failed"); keys_changed.insert(receipt_key); @@ -2182,8 +2188,8 @@ mod tests { let transfer_ack = AcknowledgementStatus::success(ack_success_b64()); let acknowledgement: Acknowledgement = transfer_ack.into(); let bytes = sha2::Sha256::digest(acknowledgement.as_bytes()).to_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&ack_key, bytes) .expect("write failed"); keys_changed.insert(ack_key); @@ -2196,15 +2202,15 @@ mod tests { let trace_hash = calc_hash(coin.denom.to_string()); let denom_key = ibc_denom_key(receiver.to_string(), &trace_hash); let bytes = coin.denom.to_string().serialize_to_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&denom_key, bytes) .expect("write failed"); keys_changed.insert(denom_key); let denom_key = ibc_denom_key(nam().to_string(), &trace_hash); let bytes = coin.denom.to_string().serialize_to_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&denom_key, bytes) .expect("write failed"); keys_changed.insert(denom_key); @@ -2218,16 +2224,16 @@ mod tests { success: true, }; let event = RawIbcEvent::Module(ModuleEvent::from(recv_event)); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let denom_trace_event = DenomTraceEvent { trace_hash: Some(trace_hash), denom: coin.denom, }; let event = RawIbcEvent::Module(ModuleEvent::from(denom_trace_event)); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let event = RawIbcEvent::ReceivePacket(ReceivePacket::new( msg.packet.clone(), @@ -2235,11 +2241,11 @@ mod tests { get_connection_id(), )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let event = RawIbcEvent::WriteAcknowledgement(WriteAcknowledgement::new( @@ -2248,11 +2254,11 @@ mod tests { get_connection_id(), )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -2260,25 +2266,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -2293,23 +2300,23 @@ mod tests { #[test] fn test_ack_packet() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); // insert an Open channel let channel_key = channel_key(&get_port_id(), &get_channel_id()); let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); // commitment @@ -2342,19 +2349,19 @@ mod tests { ); let commitment = commitment(&packet); let bytes = commitment.into_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&commitment_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -2369,8 +2376,8 @@ mod tests { }; // delete the commitment - wl_storage - .write_log + state + .write_log_mut() .delete(&commitment_key) .expect("delete failed"); keys_changed.insert(commitment_key); @@ -2386,8 +2393,8 @@ mod tests { acknowledgement: transfer_ack, }; let event = RawIbcEvent::Module(ModuleEvent::from(ack_event)); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let event = RawIbcEvent::AcknowledgePacket(AcknowledgePacket::new( packet, @@ -2395,11 +2402,11 @@ mod tests { get_connection_id(), )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -2407,25 +2414,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -2440,31 +2448,31 @@ mod tests { #[test] fn test_timeout_packet() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); // insert an Open channel let channel_key = channel_key(&get_port_id(), &get_channel_id()); let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); // init the escrow balance let balance_key = balance_key(&nam(), &Address::Internal(InternalAddress::Ibc)); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&balance_key, amount.serialize_to_vec()) .expect("write failed"); // commitment @@ -2497,19 +2505,19 @@ mod tests { ); let commitment = commitment(&packet); let bytes = commitment.into_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&commitment_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -2523,8 +2531,8 @@ mod tests { }; // delete the commitment - wl_storage - .write_log + state + .write_log_mut() .delete(&commitment_key) .expect("delete failed"); keys_changed.insert(commitment_key); @@ -2538,19 +2546,19 @@ mod tests { memo: data.memo, }; let event = RawIbcEvent::Module(ModuleEvent::from(timeout_event)); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let event = RawIbcEvent::TimeoutPacket(TimeoutPacket::new( packet, Order::Unordered, )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -2558,25 +2566,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -2591,31 +2600,31 @@ mod tests { #[test] fn test_timeout_on_close_packet() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); // insert an Open channel let channel_key = channel_key(&get_port_id(), &get_channel_id()); let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); // init the escrow balance let balance_key = balance_key(&nam(), &Address::Internal(InternalAddress::Ibc)); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&balance_key, amount.serialize_to_vec()) .expect("write failed"); // commitment @@ -2648,19 +2657,19 @@ mod tests { ); let commitment = commitment(&packet); let bytes = commitment.into_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&commitment_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -2675,8 +2684,8 @@ mod tests { }; // delete the commitment - wl_storage - .write_log + state + .write_log_mut() .delete(&commitment_key) .expect("delete failed"); keys_changed.insert(commitment_key); @@ -2690,19 +2699,19 @@ mod tests { memo: data.memo, }; let event = RawIbcEvent::Module(ModuleEvent::from(timeout_event)); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let event = RawIbcEvent::TimeoutPacket(TimeoutPacket::new( packet, Order::Unordered, )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -2710,25 +2719,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, diff --git a/crates/namada/src/ledger/native_vp/masp.rs b/crates/namada/src/ledger/native_vp/masp.rs index 7c3f3dee5c..874bb6a464 100644 --- a/crates/namada/src/ledger/native_vp/masp.rs +++ b/crates/namada/src/ledger/native_vp/masp.rs @@ -9,13 +9,13 @@ use masp_primitives::merkle_tree::CommitmentTree; use masp_primitives::sapling::Node; use masp_primitives::transaction::components::I128Sum; use masp_primitives::transaction::Transaction; -use namada_core::types::address::Address; -use namada_core::types::address::InternalAddress::Masp; -use namada_core::types::masp::encode_asset_type; -use namada_core::types::storage::{IndexedTx, Key}; +use namada_core::address::Address; +use namada_core::address::InternalAddress::Masp; +use namada_core::masp::encode_asset_type; +use namada_core::storage::{IndexedTx, Key}; use namada_gas::MASP_VERIFY_SHIELDED_TX_GAS; use namada_sdk::masp::verify_shielded_tx; -use namada_state::{OptionExt, ResultExt}; +use namada_state::{OptionExt, ResultExt, StateRead}; use namada_token::read_denom; use namada_tx::Tx; use namada_vp_env::VpEnv; @@ -34,7 +34,7 @@ use token::Amount; use crate::ledger::native_vp; use crate::ledger::native_vp::{Ctx, NativeVp}; use crate::token; -use crate::types::token::MaspDigitPos; +use crate::token::MaspDigitPos; use crate::vm::WasmCacheAccess; #[allow(missing_docs)] @@ -48,14 +48,13 @@ pub enum Error { pub type Result = std::result::Result; /// MASP VP -pub struct MaspVp<'a, DB, H, CA> +pub struct MaspVp<'a, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: namada_state::StorageHasher, + S: StateRead, CA: WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'a, DB, H, CA>, + pub ctx: Ctx<'a, S, CA>, } struct TransparentTransferData { @@ -65,10 +64,9 @@ struct TransparentTransferData { amount: Amount, } -impl<'a, DB, H, CA> MaspVp<'a, DB, H, CA> +impl<'a, S, CA> MaspVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { // Check that the transaction correctly revealed the nullifiers @@ -217,7 +215,7 @@ where let anchor_key = masp_convert_anchor_key(); let expected_anchor = self .ctx - .read_pre::(&anchor_key)? + .read_pre::(&anchor_key)? .ok_or(Error::NativeVpError( native_vp::Error::SimpleMessage("Cannot read storage"), ))?; @@ -225,9 +223,8 @@ where for description in &bundle.shielded_converts { // Check if the provided anchor matches the current // conversion tree's one - if namada_core::types::hash::Hash( - description.anchor.to_bytes(), - ) != expected_anchor + if namada_core::hash::Hash(description.anchor.to_bytes()) + != expected_anchor { tracing::debug!( "Convert description refers to an invalid anchor" @@ -398,10 +395,9 @@ fn unepoched_tokens( Ok(unepoched_tokens) } -impl<'a, DB, H, CA> NativeVp for MaspVp<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for MaspVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; @@ -413,7 +409,7 @@ where _verifiers: &BTreeSet
, ) -> Result { let epoch = self.ctx.get_block_epoch()?; - let conversion_state = self.ctx.storage.get_conversion_state(); + let conversion_state = self.ctx.state.in_mem().get_conversion_state(); let shielded_tx = self.ctx.get_shielded_action(tx_data)?; if u64::from(self.ctx.get_block_height()?) diff --git a/crates/namada/src/ledger/native_vp/mod.rs b/crates/namada/src/ledger/native_vp/mod.rs index 6692a514b8..1a6dc2e517 100644 --- a/crates/namada/src/ledger/native_vp/mod.rs +++ b/crates/namada/src/ledger/native_vp/mod.rs @@ -9,27 +9,26 @@ pub mod parameters; use std::cell::RefCell; use std::collections::BTreeSet; +use std::fmt::Debug; use borsh::BorshDeserialize; use eyre::WrapErr; -use namada_core::types::storage; -use namada_core::types::storage::Epochs; -use namada_core::types::validity_predicate::VpSentinel; +use namada_core::storage; +use namada_core::storage::Epochs; +use namada_core::validity_predicate::VpSentinel; use namada_gas::GasMetering; use namada_tx::Tx; pub use namada_vp_env::VpEnv; +use state::StateRead; use super::vp_host_fns; +use crate::address::Address; +use crate::hash::Hash; +use crate::ibc::IbcEvent; use crate::ledger::gas::VpGasMeter; use crate::state; -use crate::state::write_log::WriteLog; -use crate::state::{ResultExt, State, StorageHasher, StorageRead}; -use crate::types::address::Address; -use crate::types::hash::Hash; -use crate::types::ibc::IbcEvent; -use crate::types::storage::{ - BlockHash, BlockHeight, Epoch, Header, Key, TxIndex, -}; +use crate::state::{ResultExt, StorageRead}; +use crate::storage::{BlockHash, BlockHeight, Epoch, Header, Key, TxIndex}; use crate::vm::prefix_iter::PrefixIterators; use crate::vm::WasmCacheAccess; @@ -58,24 +57,21 @@ pub trait NativeVp { /// wrapper types and `eval_runner` field. The references must not be changed /// when [`Ctx`] is mutable. #[derive(Debug)] -pub struct Ctx<'a, DB, H, CA> +pub struct Ctx<'a, S, CA> where - DB: state::DB + for<'iter> state::DBIter<'iter>, - H: StorageHasher, + S: StateRead, CA: WasmCacheAccess, { /// The address of the account that owns the VP pub address: &'a Address, /// Storage prefix iterators. - pub iterators: RefCell>, + pub iterators: RefCell::D>>, /// VP gas meter. - pub gas_meter: RefCell, + pub gas_meter: &'a RefCell, /// Errors sentinel - pub sentinel: RefCell, - /// Read-only access to the storage. - pub storage: &'a State, - /// Read-only access to the write log. - pub write_log: &'a WriteLog, + pub sentinel: &'a RefCell, + /// Read-only state access. + pub state: &'a S, /// The transaction code is used for signature verification pub tx: &'a Tx, /// The transaction index is used to obtain the shielded transaction's @@ -97,42 +93,39 @@ where /// Read access to the prior storage (state before tx execution) via /// [`trait@StorageRead`]. #[derive(Debug)] -pub struct CtxPreStorageRead<'view, 'a: 'view, DB, H, CA> +pub struct CtxPreStorageRead<'view, 'a: 'view, S, CA> where - DB: state::DB + for<'iter> state::DBIter<'iter>, - H: StorageHasher, + S: StateRead, CA: WasmCacheAccess, { - ctx: &'view Ctx<'a, DB, H, CA>, + ctx: &'view Ctx<'a, S, CA>, } /// Read access to the posterior storage (state after tx execution) via /// [`trait@StorageRead`]. #[derive(Debug)] -pub struct CtxPostStorageRead<'view, 'a: 'view, DB, H, CA> +pub struct CtxPostStorageRead<'view, 'a: 'view, S, CA> where - DB: state::DB + for<'iter> state::DBIter<'iter>, - H: StorageHasher, + S: StateRead, CA: WasmCacheAccess, { - ctx: &'view Ctx<'a, DB, H, CA>, + ctx: &'view Ctx<'a, S, CA>, } -impl<'a, DB, H, CA> Ctx<'a, DB, H, CA> +impl<'a, S, CA> Ctx<'a, S, CA> where - DB: 'static + state::DB + for<'iter> state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Initialize a new context for native VP call #[allow(clippy::too_many_arguments)] pub fn new( address: &'a Address, - storage: &'a State, - write_log: &'a WriteLog, + state: &'a S, tx: &'a Tx, tx_index: &'a TxIndex, - gas_meter: VpGasMeter, + gas_meter: &'a RefCell, + sentinel: &'a RefCell, keys_changed: &'a BTreeSet, verifiers: &'a BTreeSet
, #[cfg(feature = "wasm-runtime")] @@ -140,11 +133,10 @@ where ) -> Self { Self { address, + state, iterators: RefCell::new(PrefixIterators::default()), - gas_meter: RefCell::new(gas_meter), - sentinel: RefCell::new(VpSentinel::default()), - storage, - write_log, + gas_meter, + sentinel, tx, tx_index, keys_changed, @@ -158,49 +150,44 @@ where /// Read access to the prior storage (state before tx execution) /// via [`trait@StorageRead`]. - pub fn pre<'view>(&'view self) -> CtxPreStorageRead<'view, 'a, DB, H, CA> { + pub fn pre<'view>(&'view self) -> CtxPreStorageRead<'view, 'a, S, CA> { CtxPreStorageRead { ctx: self } } /// Read access to the posterior storage (state after tx execution) /// via [`trait@StorageRead`]. - pub fn post<'view>( - &'view self, - ) -> CtxPostStorageRead<'view, 'a, DB, H, CA> { + pub fn post<'view>(&'view self) -> CtxPostStorageRead<'view, 'a, S, CA> { CtxPostStorageRead { ctx: self } } } -impl<'view, 'a: 'view, DB, H, CA> StorageRead - for CtxPreStorageRead<'view, 'a, DB, H, CA> +impl<'view, 'a: 'view, S, CA> StorageRead + for CtxPreStorageRead<'view, 'a, S, CA> where - DB: 'static + state::DB + for<'iter> state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { - type PrefixIter<'iter> = state::PrefixIter<'iter, DB> where Self: 'iter; + type PrefixIter<'iter> = state::PrefixIter<'iter,:: D> where Self: 'iter; fn read_bytes( &self, key: &storage::Key, ) -> Result>, state::StorageError> { vp_host_fns::read_pre( - &mut self.ctx.gas_meter.borrow_mut(), - self.ctx.storage, - self.ctx.write_log, + self.ctx.gas_meter, + self.ctx.state, key, - &mut self.ctx.sentinel.borrow_mut(), + self.ctx.sentinel, ) .into_storage_result() } fn has_key(&self, key: &storage::Key) -> Result { vp_host_fns::has_key_pre( - &mut self.ctx.gas_meter.borrow_mut(), - self.ctx.storage, - self.ctx.write_log, + self.ctx.gas_meter, + self.ctx.state, key, - &mut self.ctx.sentinel.borrow_mut(), + self.ctx.sentinel, ) .into_storage_result() } @@ -210,11 +197,11 @@ where prefix: &storage::Key, ) -> Result, state::StorageError> { vp_host_fns::iter_prefix_pre( - &mut self.ctx.gas_meter.borrow_mut(), - self.ctx.write_log, - self.ctx.storage, + self.ctx.gas_meter, + self.ctx.state.write_log(), + self.ctx.state.db(), prefix, - &mut self.ctx.sentinel.borrow_mut(), + self.ctx.sentinel, ) .into_storage_result() } @@ -226,10 +213,10 @@ where &'iter self, iter: &mut Self::PrefixIter<'iter>, ) -> Result)>, state::StorageError> { - vp_host_fns::iter_next::( - &mut self.ctx.gas_meter.borrow_mut(), + vp_host_fns::iter_next::<::D>( + self.ctx.gas_meter, iter, - &mut self.ctx.sentinel.borrow_mut(), + self.ctx.sentinel, ) .into_storage_result() } @@ -270,36 +257,33 @@ where } } -impl<'view, 'a: 'view, DB, H, CA> StorageRead - for CtxPostStorageRead<'view, 'a, DB, H, CA> +impl<'view, 'a: 'view, S, CA> StorageRead + for CtxPostStorageRead<'view, 'a, S, CA> where - DB: 'static + state::DB + for<'iter> state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { - type PrefixIter<'iter> = state::PrefixIter<'iter, DB> where Self: 'iter; + type PrefixIter<'iter> = state::PrefixIter<'iter, ::D> where Self: 'iter; fn read_bytes( &self, key: &storage::Key, ) -> Result>, state::StorageError> { vp_host_fns::read_post( - &mut self.ctx.gas_meter.borrow_mut(), - self.ctx.storage, - self.ctx.write_log, + self.ctx.gas_meter, + self.ctx.state, key, - &mut self.ctx.sentinel.borrow_mut(), + self.ctx.sentinel, ) .into_storage_result() } fn has_key(&self, key: &storage::Key) -> Result { vp_host_fns::has_key_post( - &mut self.ctx.gas_meter.borrow_mut(), - self.ctx.storage, - self.ctx.write_log, + self.ctx.gas_meter, + self.ctx.state, key, - &mut self.ctx.sentinel.borrow_mut(), + self.ctx.sentinel, ) .into_storage_result() } @@ -309,11 +293,11 @@ where prefix: &storage::Key, ) -> Result, state::StorageError> { vp_host_fns::iter_prefix_post( - &mut self.ctx.gas_meter.borrow_mut(), - self.ctx.write_log, - self.ctx.storage, + self.ctx.gas_meter, + self.ctx.state.write_log(), + self.ctx.state.db(), prefix, - &mut self.ctx.sentinel.borrow_mut(), + self.ctx.sentinel, ) .into_storage_result() } @@ -325,10 +309,10 @@ where &'iter self, iter: &mut Self::PrefixIter<'iter>, ) -> Result)>, state::StorageError> { - vp_host_fns::iter_next::( - &mut self.ctx.gas_meter.borrow_mut(), + vp_host_fns::iter_next::<::D>( + self.ctx.gas_meter, iter, - &mut self.ctx.sentinel.borrow_mut(), + self.ctx.sentinel, ) .into_storage_result() } @@ -361,7 +345,7 @@ where } fn get_native_token(&self) -> Result { - Ok(self.ctx.storage.native_token.clone()) + Ok(self.ctx.state.in_mem().native_token.clone()) } fn get_pred_epochs(&self) -> state::StorageResult { @@ -369,15 +353,14 @@ where } } -impl<'view, 'a: 'view, DB, H, CA> VpEnv<'view> for Ctx<'a, DB, H, CA> +impl<'view, 'a: 'view, S, CA> VpEnv<'view> for Ctx<'a, S, CA> where - DB: 'static + state::DB + for<'iter> state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { - type Post = CtxPostStorageRead<'view, 'a, DB, H, CA>; - type Pre = CtxPreStorageRead<'view, 'a, DB, H, CA>; - type PrefixIter<'iter> = state::PrefixIter<'iter, DB> where Self: 'iter; + type Post = CtxPostStorageRead<'view, 'a, S, CA>; + type Pre = CtxPreStorageRead<'view, 'a, S, CA>; + type PrefixIter<'iter> = state::PrefixIter<'iter, ::D> where Self: 'iter; fn pre(&'view self) -> Self::Pre { CtxPreStorageRead { ctx: self } @@ -391,45 +374,27 @@ where &self, key: &Key, ) -> Result, state::StorageError> { - vp_host_fns::read_temp( - &mut self.gas_meter.borrow_mut(), - self.write_log, - key, - &mut self.sentinel.borrow_mut(), - ) - .map(|data| data.and_then(|t| T::try_from_slice(&t[..]).ok())) - .into_storage_result() + vp_host_fns::read_temp(self.gas_meter, self.state, key, self.sentinel) + .map(|data| data.and_then(|t| T::try_from_slice(&t[..]).ok())) + .into_storage_result() } fn read_bytes_temp( &self, key: &Key, ) -> Result>, state::StorageError> { - vp_host_fns::read_temp( - &mut self.gas_meter.borrow_mut(), - self.write_log, - key, - &mut self.sentinel.borrow_mut(), - ) - .into_storage_result() + vp_host_fns::read_temp(self.gas_meter, self.state, key, self.sentinel) + .into_storage_result() } fn get_chain_id(&self) -> Result { - vp_host_fns::get_chain_id( - &mut self.gas_meter.borrow_mut(), - self.storage, - &mut self.sentinel.borrow_mut(), - ) - .into_storage_result() + vp_host_fns::get_chain_id(self.gas_meter, self.state, self.sentinel) + .into_storage_result() } fn get_block_height(&self) -> Result { - vp_host_fns::get_block_height( - &mut self.gas_meter.borrow_mut(), - self.storage, - &mut self.sentinel.borrow_mut(), - ) - .into_storage_result() + vp_host_fns::get_block_height(self.gas_meter, self.state, self.sentinel) + .into_storage_result() } fn get_block_header( @@ -437,69 +402,45 @@ where height: BlockHeight, ) -> Result, state::StorageError> { vp_host_fns::get_block_header( - &mut self.gas_meter.borrow_mut(), - self.storage, + self.gas_meter, + self.state, height, - &mut self.sentinel.borrow_mut(), + self.sentinel, ) .into_storage_result() } fn get_block_hash(&self) -> Result { - vp_host_fns::get_block_hash( - &mut self.gas_meter.borrow_mut(), - self.storage, - &mut self.sentinel.borrow_mut(), - ) - .into_storage_result() + vp_host_fns::get_block_hash(self.gas_meter, self.state, self.sentinel) + .into_storage_result() } fn get_block_epoch(&self) -> Result { - vp_host_fns::get_block_epoch( - &mut self.gas_meter.borrow_mut(), - self.storage, - &mut self.sentinel.borrow_mut(), - ) - .into_storage_result() + vp_host_fns::get_block_epoch(self.gas_meter, self.state, self.sentinel) + .into_storage_result() } fn get_tx_index(&self) -> Result { - vp_host_fns::get_tx_index( - &mut self.gas_meter.borrow_mut(), - self.tx_index, - &mut self.sentinel.borrow_mut(), - ) - .into_storage_result() + vp_host_fns::get_tx_index(self.gas_meter, self.tx_index, self.sentinel) + .into_storage_result() } fn get_native_token(&self) -> Result { - vp_host_fns::get_native_token( - &mut self.gas_meter.borrow_mut(), - self.storage, - &mut self.sentinel.borrow_mut(), - ) - .into_storage_result() + vp_host_fns::get_native_token(self.gas_meter, self.state, self.sentinel) + .into_storage_result() } fn get_pred_epochs(&self) -> state::StorageResult { - vp_host_fns::get_pred_epochs( - &mut self.gas_meter.borrow_mut(), - self.storage, - &mut self.sentinel.borrow_mut(), - ) - .into_storage_result() + vp_host_fns::get_pred_epochs(self.gas_meter, self.state, self.sentinel) + .into_storage_result() } fn get_ibc_events( &self, event_type: String, ) -> Result, state::StorageError> { - vp_host_fns::get_ibc_events( - &mut self.gas_meter.borrow_mut(), - self.write_log, - event_type, - ) - .into_storage_result() + vp_host_fns::get_ibc_events(self.gas_meter, self.state, event_type) + .into_storage_result() } fn iter_prefix<'iter>( @@ -507,11 +448,11 @@ where prefix: &Key, ) -> Result, state::StorageError> { vp_host_fns::iter_prefix_pre( - &mut self.gas_meter.borrow_mut(), - self.write_log, - self.storage, + self.gas_meter, + self.state.write_log(), + self.state.db(), prefix, - &mut self.sentinel.borrow_mut(), + self.sentinel, ) .into_storage_result() } @@ -528,22 +469,24 @@ where use crate::vm::host_env::VpCtx; use crate::vm::wasm::run::VpEvalWasm; - let eval_runner = VpEvalWasm { - db: PhantomData, - hasher: PhantomData, - cache_access: PhantomData, - }; - let mut iterators: PrefixIterators<'_, DB> = + let eval_runner = + VpEvalWasm::<::D, ::H, CA> { + db: PhantomData, + hasher: PhantomData, + cache_access: PhantomData, + }; + let mut iterators: PrefixIterators<'_, ::D> = PrefixIterators::default(); let mut result_buffer: Option> = None; let mut vp_wasm_cache = self.vp_wasm_cache.clone(); let ctx = VpCtx::new( self.address, - self.storage, - self.write_log, - &mut self.gas_meter.borrow_mut(), - &mut self.sentinel.borrow_mut(), + self.state.write_log(), + self.state.in_mem(), + self.state.db(), + self.gas_meter, + self.sentinel, self.tx, self.tx_index, &mut iterators, @@ -584,12 +527,8 @@ where } fn get_tx_code_hash(&self) -> Result, state::StorageError> { - vp_host_fns::get_tx_code_hash( - &mut self.gas_meter.borrow_mut(), - self.tx, - &mut self.sentinel.borrow_mut(), - ) - .into_storage_result() + vp_host_fns::get_tx_code_hash(self.gas_meter, self.tx, self.sentinel) + .into_storage_result() } fn read_pre( @@ -661,10 +600,9 @@ pub trait StorageReader { ) -> eyre::Result>; } -impl<'a, DB, H, CA> StorageReader for &Ctx<'a, DB, H, CA> +impl<'a, S, CA> StorageReader for &Ctx<'a, S, CA> where - DB: 'static + state::DB + for<'iter> state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Helper function. After reading posterior state, @@ -694,8 +632,6 @@ where pub(super) mod testing { use std::collections::HashMap; - use borsh::BorshDeserialize; - use super::*; #[derive(Debug, Default)] diff --git a/crates/namada/src/ledger/native_vp/multitoken.rs b/crates/namada/src/ledger/native_vp/multitoken.rs index 8999855ece..1286fa1a67 100644 --- a/crates/namada/src/ledger/native_vp/multitoken.rs +++ b/crates/namada/src/ledger/native_vp/multitoken.rs @@ -3,19 +3,20 @@ use std::collections::{BTreeSet, HashMap}; use namada_governance::is_proposal_accepted; +use namada_state::StateRead; use namada_token::storage_key::is_any_token_parameter_key; use namada_tx::Tx; use namada_vp_env::VpEnv; use thiserror::Error; +use crate::address::{Address, InternalAddress}; use crate::ledger::native_vp::{self, Ctx, NativeVp}; +use crate::storage::{Key, KeySeg}; use crate::token::storage_key::{ is_any_minted_balance_key, is_any_minter_key, is_any_token_balance_key, minter_key, }; use crate::token::Amount; -use crate::types::address::{Address, InternalAddress}; -use crate::types::storage::{Key, KeySeg}; use crate::vm::WasmCacheAccess; #[allow(missing_docs)] @@ -29,20 +30,18 @@ pub enum Error { pub type Result = std::result::Result; /// Multitoken VP -pub struct MultitokenVp<'a, DB, H, CA> +pub struct MultitokenVp<'a, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: namada_state::StorageHasher, + S: StateRead, CA: WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'a, DB, H, CA>, + pub ctx: Ctx<'a, S, CA>, } -impl<'a, DB, H, CA> NativeVp for MultitokenVp<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for MultitokenVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; @@ -128,7 +127,7 @@ where } } else if is_any_token_parameter_key(key).is_some() { return self.is_valid_parameter(tx_data); - } else if key.segments.get(0) + } else if key.segments.first() == Some( &Address::Internal(InternalAddress::Multitoken).to_db_key(), ) @@ -168,10 +167,9 @@ where } } -impl<'a, DB, H, CA> MultitokenVp<'a, DB, H, CA> +impl<'a, S, CA> MultitokenVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Return the minter if the minter is valid and the minter VP exists @@ -214,37 +212,33 @@ where #[cfg(test)] mod tests { - use std::collections::BTreeSet; + use std::cell::RefCell; use borsh_ext::BorshSerializeExt; + use namada_core::validity_predicate::VpSentinel; use namada_gas::TxGasMeter; - use namada_state::testing::TestWlStorage; + use namada_state::testing::TestState; use namada_tx::data::TxType; - use namada_tx::{Code, Data, Section, Signature, Tx}; + use namada_tx::{Code, Data, Section, Signature}; use super::*; - use crate::core::types::address::nam; - use crate::core::types::address::testing::{ - established_address_1, established_address_2, + use crate::core::address::testing::{ + established_address_1, established_address_2, nam, }; + use crate::key::testing::keypair_1; use crate::ledger::gas::VpGasMeter; use crate::ledger::ibc::storage::ibc_token; - use crate::token::storage_key::{ - balance_key, minted_balance_key, minter_key, - }; - use crate::token::Amount; - use crate::types::address::{Address, InternalAddress}; - use crate::types::key::testing::keypair_1; - use crate::types::storage::TxIndex; + use crate::storage::TxIndex; + use crate::token::storage_key::{balance_key, minted_balance_key}; use crate::vm::wasm::compilation_cache::common::testing::cache as wasm_cache; const ADDRESS: Address = Address::Internal(InternalAddress::Multitoken); - fn dummy_tx(wl_storage: &TestWlStorage) -> Tx { + fn dummy_tx(state: &TestState) -> Tx { let tx_code = vec![]; let tx_data = vec![]; let mut tx = Tx::from_type(TxType::Raw); - tx.header.chain_id = wl_storage.storage.chain_id.clone(); + tx.header.chain_id = state.in_mem().chain_id.clone(); tx.set_code(Code::new(tx_code, None)); tx.set_data(Data::new(tx_data)); tx.add_section(Section::Signature(Signature::new( @@ -257,48 +251,48 @@ mod tests { #[test] fn test_valid_transfer() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); let sender = established_address_1(); let sender_key = balance_key(&nam(), &sender); let amount = Amount::native_whole(100); - wl_storage - .storage - .write(&sender_key, amount.serialize_to_vec()) + state + .db_write(&sender_key, amount.serialize_to_vec()) .expect("write failed"); // transfer 10 let amount = Amount::native_whole(90); - wl_storage - .write_log + state + .write_log_mut() .write(&sender_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(sender_key); let receiver = established_address_2(); let receiver_key = balance_key(&nam(), &receiver); let amount = Amount::native_whole(10); - wl_storage - .write_log + state + .write_log_mut() .write(&receiver_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(receiver_key); let tx_index = TxIndex::default(); - let tx = dummy_tx(&wl_storage); - let gas_meter = VpGasMeter::new_from_tx_meter( + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let mut verifiers = BTreeSet::new(); verifiers.insert(sender); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -313,21 +307,20 @@ mod tests { #[test] fn test_invalid_transfer() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); let sender = established_address_1(); let sender_key = balance_key(&nam(), &sender); let amount = Amount::native_whole(100); - wl_storage - .storage - .write(&sender_key, amount.serialize_to_vec()) + state + .db_write(&sender_key, amount.serialize_to_vec()) .expect("write failed"); // transfer 10 let amount = Amount::native_whole(90); - wl_storage - .write_log + state + .write_log_mut() .write(&sender_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(sender_key); @@ -335,26 +328,27 @@ mod tests { let receiver_key = balance_key(&nam(), &receiver); // receive more than 10 let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&receiver_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(receiver_key); let tx_index = TxIndex::default(); - let tx = dummy_tx(&wl_storage); - let gas_meter = VpGasMeter::new_from_tx_meter( + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -369,7 +363,7 @@ mod tests { #[test] fn test_valid_mint() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); // IBC token @@ -379,15 +373,15 @@ mod tests { let target = established_address_1(); let target_key = balance_key(&token, &target); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&target_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(target_key); let minted_key = minted_balance_key(&token); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&minted_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minted_key); @@ -395,28 +389,29 @@ mod tests { // minter let minter = Address::Internal(InternalAddress::Ibc); let minter_key = minter_key(&token); - wl_storage - .write_log + state + .write_log_mut() .write(&minter_key, minter.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minter_key); let tx_index = TxIndex::default(); - let tx = dummy_tx(&wl_storage); - let gas_meter = VpGasMeter::new_from_tx_meter( + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let mut verifiers = BTreeSet::new(); // for the minter verifiers.insert(minter); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -431,7 +426,7 @@ mod tests { #[test] fn test_invalid_mint() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); // mint 100 @@ -439,15 +434,15 @@ mod tests { let target_key = balance_key(&nam(), &target); // mint more than 100 let amount = Amount::native_whole(1000); - wl_storage - .write_log + state + .write_log_mut() .write(&target_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(target_key); let minted_key = minted_balance_key(&nam()); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&minted_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minted_key); @@ -455,28 +450,29 @@ mod tests { // minter let minter = nam(); let minter_key = minter_key(&nam()); - wl_storage - .write_log + state + .write_log_mut() .write(&minter_key, minter.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minter_key); let tx_index = TxIndex::default(); - let tx = dummy_tx(&wl_storage); - let gas_meter = VpGasMeter::new_from_tx_meter( + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let mut verifiers = BTreeSet::new(); // for the minter verifiers.insert(minter); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -491,7 +487,7 @@ mod tests { #[test] fn test_no_minter() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); // IBC token @@ -501,15 +497,15 @@ mod tests { let target = established_address_1(); let target_key = balance_key(&token, &target); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&target_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(target_key); let minted_key = minted_balance_key(&token); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&minted_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minted_key); @@ -517,19 +513,20 @@ mod tests { // no minter is set let tx_index = TxIndex::default(); - let tx = dummy_tx(&wl_storage); - let gas_meter = VpGasMeter::new_from_tx_meter( + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -544,7 +541,7 @@ mod tests { #[test] fn test_invalid_minter() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); // IBC token @@ -554,15 +551,15 @@ mod tests { let target = established_address_1(); let target_key = balance_key(&token, &target); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&target_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(target_key); let minted_key = minted_balance_key(&token); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&minted_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minted_key); @@ -570,28 +567,29 @@ mod tests { // invalid minter let minter = established_address_1(); let minter_key = minter_key(&token); - wl_storage - .write_log + state + .write_log_mut() .write(&minter_key, minter.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minter_key); let tx_index = TxIndex::default(); - let tx = dummy_tx(&wl_storage); - let gas_meter = VpGasMeter::new_from_tx_meter( + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let mut verifiers = BTreeSet::new(); // for the minter verifiers.insert(minter); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -606,34 +604,35 @@ mod tests { #[test] fn test_invalid_minter_update() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); let minter_key = minter_key(&nam()); let minter = established_address_1(); - wl_storage - .write_log + state + .write_log_mut() .write(&minter_key, minter.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minter_key); let tx_index = TxIndex::default(); - let tx = dummy_tx(&wl_storage); - let gas_meter = VpGasMeter::new_from_tx_meter( + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let mut verifiers = BTreeSet::new(); // for the minter verifiers.insert(minter); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -648,7 +647,7 @@ mod tests { #[test] fn test_invalid_key_update() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); let key = Key::from( @@ -656,27 +655,28 @@ mod tests { ) .push(&"invalid_segment".to_string()) .unwrap(); - wl_storage - .write_log + state + .write_log_mut() .write(&key, 0.serialize_to_vec()) .expect("write failed"); keys_changed.insert(key); let tx_index = TxIndex::default(); - let tx = dummy_tx(&wl_storage); - let gas_meter = VpGasMeter::new_from_tx_meter( + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, diff --git a/crates/namada/src/ledger/native_vp/parameters.rs b/crates/namada/src/ledger/native_vp/parameters.rs index b22f56260e..4fe0f11f76 100644 --- a/crates/namada/src/ledger/native_vp/parameters.rs +++ b/crates/namada/src/ledger/native_vp/parameters.rs @@ -2,8 +2,9 @@ use std::collections::BTreeSet; -use namada_core::types::address::Address; -use namada_core::types::storage::Key; +use namada_core::address::Address; +use namada_core::storage::Key; +use namada_state::StateRead; use namada_tx::Tx; use thiserror::Error; @@ -21,20 +22,18 @@ pub enum Error { pub type Result = std::result::Result; /// Parameters VP -pub struct ParametersVp<'a, DB, H, CA> +pub struct ParametersVp<'a, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: namada_state::StorageHasher, + S: StateRead, CA: WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'a, DB, H, CA>, + pub ctx: Ctx<'a, S, CA>, } -impl<'a, DB, H, CA> NativeVp for ParametersVp<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for ParametersVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; diff --git a/crates/namada/src/ledger/pgf/mod.rs b/crates/namada/src/ledger/pgf/mod.rs index eb7ec70a98..264f96691b 100644 --- a/crates/namada/src/ledger/pgf/mod.rs +++ b/crates/namada/src/ledger/pgf/mod.rs @@ -7,13 +7,14 @@ use std::collections::BTreeSet; use namada_governance::pgf::storage::keys as pgf_storage; use namada_governance::{is_proposal_accepted, pgf}; +use namada_state::StateRead; use namada_tx::Tx; use thiserror::Error; +use crate::address::{Address, InternalAddress}; use crate::ledger::native_vp; use crate::ledger::native_vp::{Ctx, NativeVp}; -use crate::types::address::{Address, InternalAddress}; -use crate::types::storage::Key; +use crate::storage::Key; use crate::vm::WasmCacheAccess; /// for handling Pgf NativeVP errors @@ -30,20 +31,18 @@ pub enum Error { } /// Pgf VP -pub struct PgfVp<'a, DB, H, CA> +pub struct PgfVp<'a, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: namada_state::StorageHasher, + S: StateRead, CA: WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'a, DB, H, CA>, + pub ctx: Ctx<'a, S, CA>, } -impl<'a, DB, H, CA> NativeVp for PgfVp<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for PgfVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; @@ -111,10 +110,9 @@ where } } -impl<'a, DB, H, CA> PgfVp<'a, DB, H, CA> +impl<'a, S, CA> PgfVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Validate a governance parameter diff --git a/crates/namada/src/ledger/pgf/utils.rs b/crates/namada/src/ledger/pgf/utils.rs index 8132de32af..6f6153885e 100644 --- a/crates/namada/src/ledger/pgf/utils.rs +++ b/crates/namada/src/ledger/pgf/utils.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use namada_core::types::address::Address; +use namada_core::address::Address; use crate::ledger::events::EventType; use crate::token; diff --git a/crates/namada/src/ledger/pos/mod.rs b/crates/namada/src/ledger/pos/mod.rs index a7e8fa0d22..7d7e552b63 100644 --- a/crates/namada/src/ledger/pos/mod.rs +++ b/crates/namada/src/ledger/pos/mod.rs @@ -2,11 +2,9 @@ pub mod vp; -use std::convert::TryFrom; - -use namada_core::types::address; -pub use namada_core::types::dec::Dec; -pub use namada_core::types::key::common; +use namada_core::address; +pub use namada_core::dec::Dec; +pub use namada_core::key::common; pub use namada_proof_of_stake::parameters::{OwnedPosParams, PosParams}; pub use namada_proof_of_stake::pos_queries::*; pub use namada_proof_of_stake::storage::*; @@ -16,8 +14,8 @@ pub use namada_proof_of_stake::{staking_token_address, types}; pub use vp::PosVP; pub use {namada_proof_of_stake, namada_state}; +use crate::address::{Address, InternalAddress}; pub use crate::token; -use crate::types::address::{Address, InternalAddress}; /// Address of the PoS account implemented as a native VP pub const ADDRESS: Address = address::POS; diff --git a/crates/namada/src/ledger/pos/vp.rs b/crates/namada/src/ledger/pos/vp.rs index 69ec002654..9a0de30c83 100644 --- a/crates/namada/src/ledger/pos/vp.rs +++ b/crates/namada/src/ledger/pos/vp.rs @@ -13,14 +13,14 @@ pub use namada_proof_of_stake::types; // is_validator_address_raw_hash_key, // is_validator_max_commission_rate_change_key, // }; -use namada_state::StorageHasher; +use namada_state::StateRead; use namada_state::StorageRead; use namada_tx::Tx; use thiserror::Error; +use crate::address::{Address, InternalAddress}; use crate::ledger::native_vp::{self, Ctx, NativeVp}; -use crate::types::address::{Address, InternalAddress}; -use crate::types::storage::{Key, KeySeg}; +use crate::storage::{Key, KeySeg}; use crate::vm::WasmCacheAccess; #[allow(missing_docs)] @@ -34,32 +34,29 @@ pub enum Error { pub type Result = std::result::Result; /// Proof-of-Stake validity predicate -pub struct PosVP<'a, DB, H, CA> +pub struct PosVP<'a, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead, CA: WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'a, DB, H, CA>, + pub ctx: Ctx<'a, S, CA>, } -impl<'a, DB, H, CA> PosVP<'a, DB, H, CA> +impl<'a, S, CA> PosVP<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Instantiate a `PosVP`. - pub fn new(ctx: Ctx<'a, DB, H, CA>) -> Self { + pub fn new(ctx: Ctx<'a, S, CA>) -> Self { Self { ctx } } } -impl<'a, DB, H, CA> NativeVp for PosVP<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for PosVP<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; @@ -95,7 +92,7 @@ where { return Ok(false); } - } else if key.segments.get(0) == Some(&addr.to_db_key()) { + } else if key.segments.first() == Some(&addr.to_db_key()) { // Unknown changes to this address space are disallowed // tracing::info!("PoS unrecognized key change {} rejected", // key); diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index 9ffa91b6c9..6a7991ef65 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -1,15 +1,17 @@ //! The ledger's protocol +use std::cell::RefCell; use std::collections::BTreeSet; +use std::fmt::Debug; use borsh_ext::BorshSerializeExt; use eyre::{eyre, WrapErr}; use masp_primitives::transaction::Transaction; -use namada_core::types::hash::Hash; -use namada_core::types::storage::Key; +use namada_core::hash::Hash; +use namada_core::storage::Key; +use namada_core::validity_predicate::VpSentinel; use namada_gas::TxGasMeter; use namada_sdk::tx::TX_TRANSFER_WASM; -use namada_state::wl_storage::WriteLogAndStorage; -use namada_state::StorageRead; +use namada_state::StorageWrite; use namada_tx::data::protocol::ProtocolTxType; use namada_tx::data::{ DecryptedTx, GasLimit, TxResult, TxType, VpsResult, WrapperTx, @@ -19,6 +21,7 @@ use namada_vote_ext::EthereumTxData; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use thiserror::Error; +use crate::address::{Address, InternalAddress}; use crate::ledger::gas::{GasMetering, VpGasMeter}; use crate::ledger::governance::GovernanceVp; use crate::ledger::native_vp::ethereum_bridge::bridge_pool_vp::BridgePoolVp; @@ -31,12 +34,10 @@ use crate::ledger::native_vp::parameters::{self, ParametersVp}; use crate::ledger::native_vp::{self, NativeVp}; use crate::ledger::pgf::PgfVp; use crate::ledger::pos::{self, PosVP}; -use crate::state::write_log::WriteLog; -use crate::state::{DBIter, State, StorageHasher, WlStorage, DB}; +use crate::state::{DBIter, State, StorageHasher, StorageRead, WlState, DB}; +use crate::storage; +use crate::storage::TxIndex; use crate::token::Amount; -use crate::types::address::{Address, InternalAddress}; -use crate::types::storage; -use crate::types::storage::TxIndex; use crate::vm::wasm::{TxCache, VpCache}; use crate::vm::{self, wasm, WasmCacheAccess}; @@ -101,32 +102,37 @@ pub enum Error { /// Shell parameters for running wasm transactions. #[allow(missing_docs)] -pub struct ShellParams<'a, CA, WLS> +#[derive(Debug)] +pub struct ShellParams<'a, S, D, H, CA> where + S: State + Sync, + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, - WLS: WriteLogAndStorage + StorageRead, { - tx_gas_meter: &'a mut TxGasMeter, - wl_storage: &'a mut WLS, - vp_wasm_cache: &'a mut VpCache, - tx_wasm_cache: &'a mut TxCache, + pub tx_gas_meter: &'a RefCell, + pub state: &'a mut S, + pub vp_wasm_cache: &'a mut VpCache, + pub tx_wasm_cache: &'a mut TxCache, } -impl<'a, CA, WLS> ShellParams<'a, CA, WLS> +impl<'a, S, D, H, CA> ShellParams<'a, S, D, H, CA> where + S: State + Sync, + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, - WLS: WriteLogAndStorage + StorageRead, { /// Create a new instance of `ShellParams` pub fn new( - tx_gas_meter: &'a mut TxGasMeter, - wl_storage: &'a mut WLS, + tx_gas_meter: &'a RefCell, + state: &'a mut S, vp_wasm_cache: &'a mut VpCache, tx_wasm_cache: &'a mut TxCache, ) -> Self { Self { tx_gas_meter, - wl_storage, + state, vp_wasm_cache, tx_wasm_cache, } @@ -156,8 +162,8 @@ pub fn dispatch_tx<'a, D, H, CA>( tx: Tx, tx_bytes: &'a [u8], tx_index: TxIndex, - tx_gas_meter: &'a mut TxGasMeter, - wl_storage: &'a mut WlStorage, + tx_gas_meter: &'a RefCell, + state: &'a mut WlState, vp_wasm_cache: &'a mut VpCache, tx_wasm_cache: &'a mut TxCache, wrapper_args: Option<&mut WrapperArgs>, @@ -174,13 +180,13 @@ where &tx_index, ShellParams { tx_gas_meter, - wl_storage, + state, vp_wasm_cache, tx_wasm_cache, }, ), TxType::Protocol(protocol_tx) => { - apply_protocol_tx(protocol_tx.tx, tx.data(), wl_storage) + apply_protocol_tx(protocol_tx.tx, tx.data(), state) } TxType::Wrapper(ref wrapper) => { let fee_unshielding_transaction = @@ -192,14 +198,14 @@ where tx_bytes, ShellParams { tx_gas_meter, - wl_storage, + state, vp_wasm_cache, tx_wasm_cache, }, wrapper_args, )?; Ok(TxResult { - gas_used: tx_gas_meter.get_tx_consumed_gas(), + gas_used: tx_gas_meter.borrow().get_tx_consumed_gas(), changed_keys, vps_result: VpsResult::default(), initialized_accounts: vec![], @@ -235,25 +241,26 @@ where /// - gas accounting /// /// Returns the set of changed storage keys. -pub(crate) fn apply_wrapper_tx<'a, D, H, CA, WLS>( +pub(crate) fn apply_wrapper_tx( tx: Tx, wrapper: &WrapperTx, fee_unshield_transaction: Option, tx_bytes: &[u8], - mut shell_params: ShellParams<'a, CA, WLS>, + mut shell_params: ShellParams<'_, S, D, H, CA>, wrapper_args: Option<&mut WrapperArgs>, ) -> Result> where - CA: 'static + WasmCacheAccess + Sync, + S: State + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, - WLS: WriteLogAndStorage + StorageRead, + CA: 'static + WasmCacheAccess + Sync, { let mut changed_keys = BTreeSet::default(); // Write wrapper tx hash to storage shell_params - .wl_storage + .state + .write_log_mut() .write_tx_hash(tx.header_hash()) .expect("Error while writing tx hash to storage"); @@ -269,6 +276,7 @@ where // Account for gas shell_params .tx_gas_meter + .borrow_mut() .add_wrapper_gas(tx_bytes) .map_err(|err| Error::GasError(err.to_string()))?; @@ -299,22 +307,22 @@ pub fn get_fee_unshielding_transaction( /// - Fee amount overflows /// - Not enough funds are available to pay the entire amount of the fee /// - The accumulated fee amount to be credited to the block proposer overflows -fn charge_fee<'a, D, H, CA, WLS>( +fn charge_fee<'a, S, D, H, CA>( wrapper: &WrapperTx, masp_transaction: Option, - shell_params: &mut ShellParams<'a, CA, WLS>, + shell_params: &mut ShellParams<'a, S, D, H, CA>, changed_keys: &mut BTreeSet, wrapper_args: Option<&mut WrapperArgs>, ) -> Result<()> where - CA: 'static + WasmCacheAccess + Sync, + S: State + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, - WLS: WriteLogAndStorage + StorageRead, + CA: 'static + WasmCacheAccess + Sync, { let ShellParams { tx_gas_meter: _, - wl_storage, + state, vp_wasm_cache, tx_wasm_cache, } = shell_params; @@ -323,22 +331,22 @@ where let requires_fee_unshield = if let Some(transaction) = masp_transaction { // The unshielding tx does not charge gas, instantiate a // custom gas meter for this step - let mut tx_gas_meter = - TxGasMeter::new(GasLimit::from( - wl_storage + let tx_gas_meter = + RefCell::new(TxGasMeter::new(GasLimit::from( + state .read::( &namada_parameters::storage::get_fee_unshielding_gas_limit_key( ), ) .expect("Error reading the storage") .expect("Missing fee unshielding gas limit in storage")), - ); + )); // If it fails, do not return early // from this function but try to take the funds from the unshielded // balance match wrapper.generate_fee_unshielding( - get_transfer_hash_from_storage(*wl_storage), + get_transfer_hash_from_storage(*state), Some(TX_TRANSFER_WASM.to_string()), transaction, ) { @@ -346,13 +354,13 @@ where // NOTE: A clean tx write log must be provided to this call // for a correct vp validation. Block write log, instead, // should contain any prior changes (if any) - wl_storage.write_log_mut().precommit_tx(); + state.write_log_mut().precommit_tx(); match apply_wasm_tx( fee_unshielding_tx, &TxIndex::default(), ShellParams { - tx_gas_meter: &mut tx_gas_meter, - wl_storage: *wl_storage, + tx_gas_meter: &tx_gas_meter, + state: *state, vp_wasm_cache, tx_wasm_cache, }, @@ -361,7 +369,7 @@ where // NOTE: do not commit yet cause this could be // exploited to get free unshieldings if !result.is_accepted() { - wl_storage.write_log_mut().drop_tx_keep_precommit(); + state.write_log_mut().drop_tx_keep_precommit(); tracing::error!( "The unshielding tx is invalid, some VPs \ rejected it: {:#?}", @@ -370,7 +378,7 @@ where } } Err(e) => { - wl_storage.write_log_mut().drop_tx_keep_precommit(); + state.write_log_mut().drop_tx_keep_precommit(); tracing::error!( "The unshielding tx is invalid, wasm run failed: \ {}", @@ -392,14 +400,14 @@ where Some(WrapperArgs { block_proposer, is_committed_fee_unshield: _, - }) => transfer_fee(*wl_storage, block_proposer, wrapper)?, - None => check_fees(*wl_storage, wrapper)?, + }) => transfer_fee(*state, block_proposer, wrapper)?, + None => check_fees(*state, wrapper)?, } - changed_keys.extend(wl_storage.write_log_mut().get_keys_with_precommit()); + changed_keys.extend(state.write_log_mut().get_keys_with_precommit()); // Commit tx write log even in case of subsequent errors - wl_storage.write_log_mut().commit_tx(); + state.write_log_mut().commit_tx(); // Update the flag only after the fee payment has been committed if let Some(args) = wrapper_args { args.is_committed_fee_unshield = requires_fee_unshield; @@ -410,16 +418,16 @@ where /// Perform the actual transfer of fess from the fee payer to the block /// proposer. -pub fn transfer_fee( - wl_storage: &mut WLS, +pub fn transfer_fee( + state: &mut S, block_proposer: &Address, wrapper: &WrapperTx, ) -> Result<()> where - WLS: WriteLogAndStorage + StorageRead, + S: State + StorageRead + StorageWrite, { let balance = crate::token::read_balance( - wl_storage, + state, &wrapper.fee.token, &wrapper.fee_payer(), ) @@ -427,15 +435,12 @@ where match wrapper.get_tx_fee() { Ok(fees) => { - let fees = crate::token::denom_to_amount( - fees, - &wrapper.fee.token, - wl_storage, - ) - .map_err(|e| Error::FeeError(e.to_string()))?; + let fees = + crate::token::denom_to_amount(fees, &wrapper.fee.token, state) + .map_err(|e| Error::FeeError(e.to_string()))?; if balance.checked_sub(fees).is_some() { token_transfer( - wl_storage, + state, &wrapper.fee.token, &wrapper.fee_payer(), block_proposer, @@ -454,7 +459,7 @@ where shouldn't happen." ); token_transfer( - wl_storage, + state, &wrapper.fee.token, &wrapper.fee_payer(), block_proposer, @@ -489,17 +494,17 @@ where /// `crate::token::transfer` this function updates the tx write log and /// not the block write log. fn token_transfer( - wl_storage: &mut WLS, + state: &mut WLS, token: &Address, src: &Address, dest: &Address, amount: Amount, ) -> Result<()> where - WLS: WriteLogAndStorage + StorageRead, + WLS: State + StorageRead, { let src_key = crate::token::storage_key::balance_key(token, src); - let src_balance = crate::token::read_balance(wl_storage, token, src) + let src_balance = crate::token::read_balance(state, token, src) .expect("Token balance read in protocol must not fail"); match src_balance.checked_sub(amount) { Some(new_src_balance) => { @@ -507,16 +512,15 @@ where return Ok(()); } let dest_key = crate::token::storage_key::balance_key(token, dest); - let dest_balance = - crate::token::read_balance(wl_storage, token, dest) - .expect("Token balance read in protocol must not fail"); + let dest_balance = crate::token::read_balance(state, token, dest) + .expect("Token balance read in protocol must not fail"); match dest_balance.checked_add(amount) { Some(new_dest_balance) => { - wl_storage + state .write_log_mut() .write(&src_key, new_src_balance.serialize_to_vec()) .map_err(|e| Error::FeeError(e.to_string()))?; - match wl_storage + match state .write_log_mut() .write(&dest_key, new_dest_balance.serialize_to_vec()) { @@ -535,12 +539,12 @@ where } /// Check if the fee payer has enough transparent balance to pay fees -pub fn check_fees(wl_storage: &WLS, wrapper: &WrapperTx) -> Result<()> +pub fn check_fees(state: &S, wrapper: &WrapperTx) -> Result<()> where - WLS: WriteLogAndStorage + StorageRead, + S: State + StorageRead, { let balance = crate::token::read_balance( - wl_storage, + state, &wrapper.fee.token, &wrapper.fee_payer(), ) @@ -550,9 +554,8 @@ where .get_tx_fee() .map_err(|e| Error::FeeError(e.to_string()))?; - let fees = - crate::token::denom_to_amount(fees, &wrapper.fee.token, wl_storage) - .map_err(|e| Error::FeeError(e.to_string()))?; + let fees = crate::token::denom_to_amount(fees, &wrapper.fee.token, state) + .map_err(|e| Error::FeeError(e.to_string()))?; if balance.checked_sub(fees).is_some() { Ok(()) } else { @@ -564,37 +567,27 @@ where /// Apply a transaction going via the wasm environment. Gas will be metered and /// validity predicates will be triggered in the normal way. -pub fn apply_wasm_tx<'a, D, H, CA, WLS>( +pub fn apply_wasm_tx<'a, S, D, H, CA>( tx: Tx, tx_index: &TxIndex, - shell_params: ShellParams<'a, CA, WLS>, + shell_params: ShellParams<'a, S, D, H, CA>, ) -> Result where - CA: 'static + WasmCacheAccess + Sync, + S: State + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, - WLS: WriteLogAndStorage + StorageRead, + CA: 'static + WasmCacheAccess + Sync, { let ShellParams { tx_gas_meter, - wl_storage, + state, vp_wasm_cache, tx_wasm_cache, } = shell_params; - let (tx_gas_meter, storage, write_log, vp_wasm_cache, tx_wasm_cache) = { - let (write_log, storage) = wl_storage.split_borrow(); - ( - tx_gas_meter, - storage, - write_log, - vp_wasm_cache, - tx_wasm_cache, - ) - }; - let tx_hash = tx.raw_header_hash(); - if let Some(true) = write_log.has_replay_protection_entry(&tx_hash) { + if let Some(true) = state.write_log().has_replay_protection_entry(&tx_hash) + { // If the same transaction has already been applied in this block, skip // execution and return return Err(Error::ReplayAttempt(tx_hash)); @@ -603,9 +596,8 @@ where let verifiers = execute_tx( &tx, tx_index, - storage, + state, tx_gas_meter, - write_log, vp_wasm_cache, tx_wasm_cache, )?; @@ -613,17 +605,16 @@ where let vps_result = check_vps(CheckVps { tx: &tx, tx_index, - storage, - tx_gas_meter, - write_log, + state, + tx_gas_meter: &mut tx_gas_meter.borrow_mut(), verifiers_from_tx: &verifiers, vp_wasm_cache, })?; - let gas_used = tx_gas_meter.get_tx_consumed_gas(); - let initialized_accounts = write_log.get_initialized_accounts(); - let changed_keys = write_log.get_keys(); - let ibc_events = write_log.take_ibc_events(); + let gas_used = tx_gas_meter.borrow().get_tx_consumed_gas(); + let initialized_accounts = state.write_log().get_initialized_accounts(); + let changed_keys = state.write_log().get_keys(); + let ibc_events = state.write_log_mut().take_ibc_events(); Ok(TxResult { gas_used, @@ -637,10 +628,7 @@ where /// Returns [`Error::DisallowedTx`] when the given tx is inner (decrypted) tx /// and its code `Hash` is not included in the `tx_allowlist` parameter. -pub fn check_tx_allowed( - tx: &Tx, - wl_storage: &WlStorage, -) -> Result<()> +pub fn check_tx_allowed(tx: &Tx, state: &WlState) -> Result<()> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -650,11 +638,8 @@ where .get_section(tx.code_sechash()) .and_then(|x| Section::code_sec(&x)) { - if crate::parameters::is_tx_allowed( - wl_storage, - &code_sec.code.hash(), - ) - .map_err(Error::StorageError)? + if crate::parameters::is_tx_allowed(state, &code_sec.code.hash()) + .map_err(Error::StorageError)? { return Ok(()); } @@ -673,7 +658,7 @@ where pub(crate) fn apply_protocol_tx( tx: ProtocolTxType, data: Option>, - storage: &mut WlStorage, + state: &mut WlState, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -683,9 +668,9 @@ where use namada_vote_ext::{ethereum_events, validator_set_update}; let Some(data) = data else { - return Err(Error::ProtocolTxError( - eyre!("Protocol tx data must be present")), - ); + return Err(Error::ProtocolTxError(eyre!( + "Protocol tx data must be present" + ))); }; let ethereum_tx_data = EthereumTxData::deserialize(&tx, &data) .wrap_err_with(|| { @@ -702,15 +687,12 @@ where ) => { let ethereum_events::VextDigest { events, .. } = ethereum_events::VextDigest::singleton(ext); - transactions::ethereum_events::apply_derived_tx(storage, events) + transactions::ethereum_events::apply_derived_tx(state, events) .map_err(Error::ProtocolTxError) } EthereumTxData::BridgePoolVext(ext) => { - transactions::bridge_pool_roots::apply_derived_tx( - storage, - ext.into(), - ) - .map_err(Error::ProtocolTxError) + transactions::bridge_pool_roots::apply_derived_tx(state, ext.into()) + .map_err(Error::ProtocolTxError) } EthereumTxData::ValSetUpdateVext(ext) => { // NOTE(feature = "abcipp"): with ABCI++, we can write the @@ -720,7 +702,7 @@ where // to reach a complete proof. let signing_epoch = ext.data.signing_epoch; transactions::validator_set_update::aggregate_votes( - storage, + state, validator_set_update::VextDigest::singleton(ext), signing_epoch, ) @@ -741,23 +723,22 @@ where /// Execute a transaction code. Returns verifiers requested by the transaction. #[allow(clippy::too_many_arguments)] -fn execute_tx( +fn execute_tx( tx: &Tx, tx_index: &TxIndex, - storage: &State, - tx_gas_meter: &mut TxGasMeter, - write_log: &mut WriteLog, + state: &mut S, + tx_gas_meter: &RefCell, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, ) -> Result> where + S: State, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, { wasm::run::tx( - storage, - write_log, + state, tx_gas_meter, tx_index, tx, @@ -772,48 +753,44 @@ where } /// Arguments to [`check_vps`]. -struct CheckVps<'a, D, H, CA> +struct CheckVps<'a, S, CA> where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, + S: State, CA: 'static + WasmCacheAccess + Sync, { tx: &'a Tx, tx_index: &'a TxIndex, - storage: &'a State, + state: &'a S, tx_gas_meter: &'a mut TxGasMeter, - write_log: &'a WriteLog, verifiers_from_tx: &'a BTreeSet
, vp_wasm_cache: &'a mut VpCache, } /// Check the acceptance of a transaction by validity predicates -fn check_vps( +fn check_vps( CheckVps { tx, tx_index, - storage, + state, tx_gas_meter, - write_log, verifiers_from_tx, vp_wasm_cache, - }: CheckVps<'_, D, H, CA>, + }: CheckVps<'_, S, CA>, ) -> Result where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, + S: State + Sync, CA: 'static + WasmCacheAccess + Sync, { - let (verifiers, keys_changed) = - write_log.verifiers_and_changed_keys(verifiers_from_tx); + let (verifiers, keys_changed) = state + .write_log() + .verifiers_and_changed_keys(verifiers_from_tx); let vps_result = execute_vps( verifiers, keys_changed, tx, tx_index, - storage, - write_log, + state, tx_gas_meter, vp_wasm_cache, )?; @@ -828,31 +805,31 @@ where /// Execute verifiers' validity predicates #[allow(clippy::too_many_arguments)] -fn execute_vps( +fn execute_vps( verifiers: BTreeSet
, keys_changed: BTreeSet, tx: &Tx, tx_index: &TxIndex, - storage: &State, - write_log: &WriteLog, + state: &S, tx_gas_meter: &TxGasMeter, vp_wasm_cache: &mut VpCache, ) -> Result where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, + S: State + Sync, CA: 'static + WasmCacheAccess + Sync, { let vps_result = verifiers .par_iter() .try_fold(VpsResult::default, |mut result, addr| { - let mut gas_meter = VpGasMeter::new_from_tx_meter(tx_gas_meter); + let gas_meter = + RefCell::new(VpGasMeter::new_from_tx_meter(tx_gas_meter)); let accept = match &addr { Address::Implicit(_) | Address::Established(_) => { - let (vp_hash, gas) = storage + let (vp_hash, gas) = state .validity_predicate(addr) .map_err(Error::StateError)?; gas_meter + .borrow_mut() .consume(gas) .map_err(|err| Error::GasError(err.to_string()))?; let Some(vp_code_hash) = vp_hash else { @@ -869,9 +846,8 @@ where tx, tx_index, addr, - storage, - write_log, - &mut gas_meter, + state, + &gas_meter, &keys_changed, &verifiers, vp_wasm_cache.clone(), @@ -885,185 +861,94 @@ where }) } Address::Internal(internal_addr) => { + let sentinel = RefCell::new(VpSentinel::default()); let ctx = native_vp::Ctx::new( addr, - storage, - write_log, + state, tx, tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache.clone(), ); - let (accepted, sentinel): (Result, _) = - match internal_addr { - InternalAddress::PoS => { - let pos = PosVP { ctx }; - let verifiers_addr_ref = &verifiers; - let pos_ref = &pos; - // TODO this is temporarily ran in a new thread - // to - // avoid crashing the ledger (required - // `UnwindSafe` - // and `RefUnwindSafe` in - // namada/src/ledger/pos/vp.rs) - let keys_changed_ref = &keys_changed; - let result = pos_ref - .validate_tx( - tx, - keys_changed_ref, - verifiers_addr_ref, - ) - .map_err(Error::PosNativeVpError); - // Take the gas meter and sentinel - // back - // out of the context - gas_meter = pos.ctx.gas_meter.into_inner(); - (result, pos.ctx.sentinel.into_inner()) - } - InternalAddress::Ibc => { - let ibc = Ibc { ctx }; - let result = ibc - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::IbcNativeVpError); - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = ibc.ctx.gas_meter.into_inner(); - (result, ibc.ctx.sentinel.into_inner()) - } - InternalAddress::Parameters => { - let parameters = ParametersVp { ctx }; - let result = parameters - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::ParametersNativeVpError); - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = - parameters.ctx.gas_meter.into_inner(); - (result, parameters.ctx.sentinel.into_inner()) - } - InternalAddress::PosSlashPool => { - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = ctx.gas_meter.into_inner(); - ( - Err(Error::AccessForbidden( - (*internal_addr).clone(), - )), - ctx.sentinel.into_inner(), - ) - } - InternalAddress::Governance => { - let governance = GovernanceVp { ctx }; - let result = governance - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::GovernanceNativeVpError); - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = - governance.ctx.gas_meter.into_inner(); - (result, governance.ctx.sentinel.into_inner()) - } - InternalAddress::Multitoken => { - let multitoken = MultitokenVp { ctx }; - let result = multitoken - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::MultitokenNativeVpError); - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = - multitoken.ctx.gas_meter.into_inner(); - (result, multitoken.ctx.sentinel.into_inner()) - } - InternalAddress::EthBridge => { - let bridge = EthBridge { ctx }; - let result = bridge - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::EthBridgeNativeVpError); - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = bridge.ctx.gas_meter.into_inner(); - (result, bridge.ctx.sentinel.into_inner()) - } - InternalAddress::EthBridgePool => { - let bridge_pool = BridgePoolVp { ctx }; - let result = bridge_pool - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::BridgePoolNativeVpError); - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = - bridge_pool.ctx.gas_meter.into_inner(); - (result, bridge_pool.ctx.sentinel.into_inner()) - } - InternalAddress::Pgf => { - let pgf_vp = PgfVp { ctx }; - let result = pgf_vp - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::PgfNativeVpError); - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = pgf_vp.ctx.gas_meter.into_inner(); - (result, pgf_vp.ctx.sentinel.into_inner()) - } - InternalAddress::Nut(_) => { - let non_usable_tokens = NonUsableTokens { ctx }; - let result = non_usable_tokens - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::NutNativeVpError); - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = non_usable_tokens - .ctx - .gas_meter - .into_inner(); - ( - result, - non_usable_tokens.ctx.sentinel.into_inner(), - ) - } - InternalAddress::IbcToken(_) - | InternalAddress::Erc20(_) => { - // The address should be a part of a multitoken - // key - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = ctx.gas_meter.into_inner(); - ( - Ok(verifiers.contains(&Address::Internal( - InternalAddress::Multitoken, - ))), - ctx.sentinel.into_inner(), - ) - } - InternalAddress::Masp => { - let masp = MaspVp { ctx }; - let result = masp - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::MaspNativeVpError); - // Take the gas meter and the sentinel back out - // of the context - gas_meter = masp.ctx.gas_meter.into_inner(); - (result, masp.ctx.sentinel.into_inner()) - } - }; + let accepted: Result = match internal_addr { + InternalAddress::PoS => { + let pos = PosVP { ctx }; + pos.validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::PosNativeVpError) + } + InternalAddress::Ibc => { + let ibc = Ibc { ctx }; + ibc.validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::IbcNativeVpError) + } + InternalAddress::Parameters => { + let parameters = ParametersVp { ctx }; + parameters + .validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::ParametersNativeVpError) + } + InternalAddress::PosSlashPool => Err( + Error::AccessForbidden((*internal_addr).clone()), + ), + InternalAddress::Governance => { + let governance = GovernanceVp { ctx }; + governance + .validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::GovernanceNativeVpError) + } + InternalAddress::Multitoken => { + let multitoken = MultitokenVp { ctx }; + multitoken + .validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::MultitokenNativeVpError) + } + InternalAddress::EthBridge => { + let bridge = EthBridge { ctx }; + bridge + .validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::EthBridgeNativeVpError) + } + InternalAddress::EthBridgePool => { + let bridge_pool = BridgePoolVp { ctx }; + bridge_pool + .validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::BridgePoolNativeVpError) + } + InternalAddress::Pgf => { + let pgf_vp = PgfVp { ctx }; + pgf_vp + .validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::PgfNativeVpError) + } + InternalAddress::Nut(_) => { + let non_usable_tokens = NonUsableTokens { ctx }; + non_usable_tokens + .validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::NutNativeVpError) + } + InternalAddress::IbcToken(_) + | InternalAddress::Erc20(_) => { + // The address should be a part of a multitoken + // key + Ok(verifiers.contains(&Address::Internal( + InternalAddress::Multitoken, + ))) + } + InternalAddress::Masp => { + let masp = MaspVp { ctx }; + masp.validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::MaspNativeVpError) + } + }; accepted.map_err(|err| { // No need to check invalid sig because internal vps // don't check the signature - if sentinel.is_out_of_gas() { + if sentinel.borrow().is_out_of_gas() { Error::GasError(err.to_string()) } else { err @@ -1105,7 +990,7 @@ where result .gas_used - .set(gas_meter) + .set(gas_meter.into_inner()) .map_err(|err| Error::GasError(err.to_string()))?; Ok(result) @@ -1151,15 +1036,13 @@ mod tests { use borsh::BorshDeserialize; use eyre::Result; - use namada_core::types::chain::ChainId; - use namada_core::types::ethereum_events::testing::DAI_ERC20_ETH_ADDRESS; - use namada_core::types::ethereum_events::{ - EthereumEvent, TransferToNamada, - }; - use namada_core::types::keccak::keccak_hash; - use namada_core::types::storage::BlockHeight; - use namada_core::types::voting_power::FractionalVotingPower; - use namada_core::types::{address, key}; + use namada_core::chain::ChainId; + use namada_core::ethereum_events::testing::DAI_ERC20_ETH_ADDRESS; + use namada_core::ethereum_events::{EthereumEvent, TransferToNamada}; + use namada_core::keccak::keccak_hash; + use namada_core::storage::BlockHeight; + use namada_core::voting_power::FractionalVotingPower; + use namada_core::{address, key}; use namada_ethereum_bridge::protocol::transactions::votes::{ EpochedVotingPower, Votes, }; @@ -1167,8 +1050,6 @@ mod tests { use namada_ethereum_bridge::storage::proof::EthereumProof; use namada_ethereum_bridge::storage::{vote_tallies, vp}; use namada_ethereum_bridge::test_utils; - use namada_state::StorageRead; - use namada_token::Amount; use namada_tx::{SignableEthMessage, Signed}; use namada_vote_ext::bridge_pool_roots::BridgePoolRootVext; use namada_vote_ext::ethereum_events::EthereumEventsVext; @@ -1177,14 +1058,14 @@ mod tests { fn apply_eth_tx( tx: EthereumTxData, - wl_storage: &mut WlStorage, + state: &mut WlState, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { let (data, tx) = tx.serialize(); - let tx_result = apply_protocol_tx(tx, Some(data), wl_storage)?; + let tx_result = apply_protocol_tx(tx, Some(data), state)?; Ok(tx_result) } @@ -1198,7 +1079,7 @@ mod tests { let validator_a_stake = Amount::native_whole(100); let validator_b_stake = Amount::native_whole(100); let total_stake = validator_a_stake + validator_b_stake; - let (mut wl_storage, _) = test_utils::setup_storage_with_validators( + let (mut state, _) = test_utils::setup_storage_with_validators( HashMap::from_iter(vec![ (validator_a.clone(), validator_a_stake), (validator_b, validator_b_stake), @@ -1223,11 +1104,11 @@ mod tests { namada_vote_ext::ethereum_events::SignedVext(signed), ); - apply_eth_tx(tx.clone(), &mut wl_storage)?; - apply_eth_tx(tx, &mut wl_storage)?; + apply_eth_tx(tx.clone(), &mut state)?; + apply_eth_tx(tx, &mut state)?; let eth_msg_keys = vote_tallies::Keys::from(&event); - let seen_by_bytes = wl_storage.read_bytes(ð_msg_keys.seen_by())?; + let seen_by_bytes = state.read_bytes(ð_msg_keys.seen_by())?; let seen_by_bytes = seen_by_bytes.unwrap(); assert_eq!( Votes::try_from_slice(&seen_by_bytes)?, @@ -1236,7 +1117,7 @@ mod tests { // the vote should have only be applied once let voting_power: EpochedVotingPower = - wl_storage.read(ð_msg_keys.voting_power())?.unwrap(); + state.read(ð_msg_keys.voting_power())?.unwrap(); let expected = EpochedVotingPower::from([( 0.into(), FractionalVotingPower::HALF * total_stake, @@ -1256,18 +1137,18 @@ mod tests { let validator_a_stake = Amount::native_whole(100); let validator_b_stake = Amount::native_whole(100); let total_stake = validator_a_stake + validator_b_stake; - let (mut wl_storage, keys) = test_utils::setup_storage_with_validators( + let (mut state, keys) = test_utils::setup_storage_with_validators( HashMap::from_iter(vec![ (validator_a.clone(), validator_a_stake), (validator_b, validator_b_stake), ]), ); - vp::bridge_pool::init_storage(&mut wl_storage); + vp::bridge_pool::init_storage(&mut state); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); test_utils::commit_bridge_pool_root_at_height( - &mut wl_storage, + &mut state, &root, 100.into(), ); @@ -1283,22 +1164,21 @@ mod tests { } .sign(&signing_key); let tx = EthereumTxData::BridgePoolVext(vext); - apply_eth_tx(tx.clone(), &mut wl_storage)?; - apply_eth_tx(tx, &mut wl_storage)?; + apply_eth_tx(tx.clone(), &mut state)?; + apply_eth_tx(tx, &mut state)?; let bp_root_keys = vote_tallies::Keys::from(( &vote_tallies::BridgePoolRoot(EthereumProof::new((root, nonce))), 100.into(), )); - let root_seen_by_bytes = - wl_storage.read_bytes(&bp_root_keys.seen_by())?; + let root_seen_by_bytes = state.read_bytes(&bp_root_keys.seen_by())?; assert_eq!( Votes::try_from_slice(root_seen_by_bytes.as_ref().unwrap())?, Votes::from([(validator_a, BlockHeight(100))]) ); // the vote should have only be applied once let voting_power: EpochedVotingPower = - wl_storage.read(&bp_root_keys.voting_power())?.unwrap(); + state.read(&bp_root_keys.voting_power())?.unwrap(); let expected = EpochedVotingPower::from([( 0.into(), FractionalVotingPower::HALF * total_stake, @@ -1310,7 +1190,7 @@ mod tests { #[test] fn test_apply_wasm_tx_allowlist() { - let (mut wl_storage, _validators) = test_utils::setup_default_storage(); + let (mut state, _validators) = test_utils::setup_default_storage(); let mut tx = Tx::new(ChainId::default(), None); tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); @@ -1323,13 +1203,12 @@ mod tests { { let allowlist = vec![format!("{}-bad", tx_hash)]; crate::parameters::update_tx_allowlist_parameter( - &mut wl_storage, - allowlist, + &mut state, allowlist, ) .unwrap(); - wl_storage.commit_tx(); + state.commit_tx(); - let result = check_tx_allowed(&tx, &wl_storage); + let result = check_tx_allowed(&tx, &state); assert_matches!(result.unwrap_err(), Error::DisallowedTx); } @@ -1337,13 +1216,12 @@ mod tests { { let allowlist = vec![tx_hash.to_string()]; crate::parameters::update_tx_allowlist_parameter( - &mut wl_storage, - allowlist, + &mut state, allowlist, ) .unwrap(); - wl_storage.commit_tx(); + state.commit_tx(); - let result = check_tx_allowed(&tx, &wl_storage); + let result = check_tx_allowed(&tx, &state); if let Err(result) = result { assert!(!matches!(result, Error::DisallowedTx)); } diff --git a/crates/namada/src/ledger/storage/mod.rs b/crates/namada/src/ledger/storage/mod.rs index d78fd79ec8..dcae9a776d 100644 --- a/crates/namada/src/ledger/storage/mod.rs +++ b/crates/namada/src/ledger/storage/mod.rs @@ -1,3 +1,3 @@ //! Ledger's state storage with key-value backed store and a merkle tree -pub use namada_state::{write_log, PrefixIter, WlStorage, *}; +pub use namada_state::{write_log, PrefixIter, *}; diff --git a/crates/namada/src/ledger/vp_host_fns.rs b/crates/namada/src/ledger/vp_host_fns.rs index 5ebd1a635b..738a25deae 100644 --- a/crates/namada/src/ledger/vp_host_fns.rs +++ b/crates/namada/src/ledger/vp_host_fns.rs @@ -1,23 +1,25 @@ //! Host functions for VPs used for both native and WASM VPs. +use std::cell::RefCell; +use std::fmt::Debug; use std::num::TryFromIntError; -use namada_core::types::address::{Address, ESTABLISHED_ADDRESS_BYTES_LEN}; -use namada_core::types::hash::{Hash, HASH_LENGTH}; -use namada_core::types::storage::{ +use namada_core::address::{Address, ESTABLISHED_ADDRESS_BYTES_LEN}; +use namada_core::hash::{Hash, HASH_LENGTH}; +use namada_core::storage::{ BlockHash, BlockHeight, Epoch, Epochs, Header, Key, TxIndex, TX_INDEX_LENGTH, }; -use namada_core::types::validity_predicate::VpSentinel; +use namada_core::validity_predicate::VpSentinel; use namada_gas::MEMORY_ACCESS_GAS_PER_BYTE; use namada_state::write_log::WriteLog; -use namada_state::{write_log, State, StorageHasher}; +use namada_state::{write_log, DBIter, StateRead, DB}; use namada_tx::{Section, Tx}; use thiserror::Error; +use crate::ibc::IbcEvent; use crate::ledger::gas; use crate::ledger::gas::{GasMetering, VpGasMeter}; -use crate::types::ibc::IbcEvent; /// These runtime errors will abort VP execution immediately #[allow(missing_docs)] @@ -28,7 +30,7 @@ pub enum RuntimeError { #[error("Storage error: {0}")] StorageError(namada_state::Error), #[error("Storage data error: {0}")] - StorageDataError(crate::types::storage::Error), + StorageDataError(crate::storage::Error), #[error("Encoding error: {0}")] EncodingError(std::io::Error), #[error("Numeric conversion error: {0}")] @@ -50,12 +52,12 @@ pub type EnvResult = std::result::Result; /// Add a gas cost incured in a validity predicate pub fn add_gas( - gas_meter: &mut VpGasMeter, + gas_meter: &RefCell, used_gas: u64, - sentinel: &mut VpSentinel, + sentinel: &RefCell, ) -> EnvResult<()> { - gas_meter.consume(used_gas).map_err(|err| { - sentinel.set_out_of_gas(); + gas_meter.borrow_mut().consume(used_gas).map_err(|err| { + sentinel.borrow_mut().set_out_of_gas(); tracing::info!("Stopping VP execution because of gas error: {}", err); RuntimeError::OutOfGas(err) }) @@ -63,18 +65,16 @@ pub fn add_gas( /// Storage read prior state (before tx execution). It will try to read from the /// storage. -pub fn read_pre( - gas_meter: &mut VpGasMeter, - storage: &State, - write_log: &WriteLog, +pub fn read_pre( + gas_meter: &RefCell, + state: &S, key: &Key, - sentinel: &mut VpSentinel, + sentinel: &RefCell, ) -> EnvResult>> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { - let (log_val, gas) = write_log.read_pre(key); + let (log_val, gas) = state.write_log().read_pre(key); add_gas(gas_meter, gas, sentinel)?; match log_val { Some(write_log::StorageModification::Write { ref value }) => { @@ -96,7 +96,7 @@ where None => { // When not found in write log, try to read from the storage let (value, gas) = - storage.read(key).map_err(RuntimeError::StorageError)?; + state.db_read(key).map_err(RuntimeError::StorageError)?; add_gas(gas_meter, gas, sentinel)?; Ok(value) } @@ -105,19 +105,17 @@ where /// Storage read posterior state (after tx execution). It will try to read from /// the write log first and if no entry found then from the storage. -pub fn read_post( - gas_meter: &mut VpGasMeter, - storage: &State, - write_log: &WriteLog, +pub fn read_post( + gas_meter: &RefCell, + state: &S, key: &Key, - sentinel: &mut VpSentinel, + sentinel: &RefCell, ) -> EnvResult>> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { // Try to read from the write log first - let (log_val, gas) = write_log.read(key); + let (log_val, gas) = state.write_log().read(key); add_gas(gas_meter, gas, sentinel)?; match log_val { Some(write_log::StorageModification::Write { ref value }) => { @@ -139,7 +137,7 @@ where None => { // When not found in write log, try to read from the storage let (value, gas) = - storage.read(key).map_err(RuntimeError::StorageError)?; + state.db_read(key).map_err(RuntimeError::StorageError)?; add_gas(gas_meter, gas, sentinel)?; Ok(value) } @@ -148,14 +146,17 @@ where /// Storage read temporary state (after tx execution). It will try to read from /// only the write log. -pub fn read_temp( - gas_meter: &mut VpGasMeter, - write_log: &WriteLog, +pub fn read_temp( + gas_meter: &RefCell, + state: &S, key: &Key, - sentinel: &mut VpSentinel, -) -> EnvResult>> { + sentinel: &RefCell, +) -> EnvResult>> +where + S: StateRead + Debug, +{ // Try to read from the write log first - let (log_val, gas) = write_log.read(key); + let (log_val, gas) = state.write_log().read(key); add_gas(gas_meter, gas, sentinel)?; match log_val { Some(write_log::StorageModification::Temp { ref value }) => { @@ -168,19 +169,17 @@ pub fn read_temp( /// Storage `has_key` in prior state (before tx execution). It will try to read /// from the storage. -pub fn has_key_pre( - gas_meter: &mut VpGasMeter, - storage: &State, - write_log: &WriteLog, +pub fn has_key_pre( + gas_meter: &RefCell, + state: &S, key: &Key, - sentinel: &mut VpSentinel, + sentinel: &RefCell, ) -> EnvResult where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { // Try to read from the write log first - let (log_val, gas) = write_log.read_pre(key); + let (log_val, gas) = state.write_log().read_pre(key); add_gas(gas_meter, gas, sentinel)?; match log_val { Some(&write_log::StorageModification::Write { .. }) => Ok(true), @@ -193,7 +192,7 @@ where None => { // When not found in write log, try to check the storage let (present, gas) = - storage.has_key(key).map_err(RuntimeError::StorageError)?; + state.db_has_key(key).map_err(RuntimeError::StorageError)?; add_gas(gas_meter, gas, sentinel)?; Ok(present) } @@ -202,19 +201,17 @@ where /// Storage `has_key` in posterior state (after tx execution). It will try to /// check the write log first and if no entry found then the storage. -pub fn has_key_post( - gas_meter: &mut VpGasMeter, - storage: &State, - write_log: &WriteLog, +pub fn has_key_post( + gas_meter: &RefCell, + state: &S, key: &Key, - sentinel: &mut VpSentinel, + sentinel: &RefCell, ) -> EnvResult where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { // Try to read from the write log first - let (log_val, gas) = write_log.read(key); + let (log_val, gas) = state.write_log().read(key); add_gas(gas_meter, gas, sentinel)?; match log_val { Some(&write_log::StorageModification::Write { .. }) => Ok(true), @@ -227,7 +224,7 @@ where None => { // When not found in write log, try to check the storage let (present, gas) = - storage.has_key(key).map_err(RuntimeError::StorageError)?; + state.db_has_key(key).map_err(RuntimeError::StorageError)?; add_gas(gas_meter, gas, sentinel)?; Ok(present) } @@ -235,49 +232,45 @@ where } /// Getting the chain ID. -pub fn get_chain_id( - gas_meter: &mut VpGasMeter, - storage: &State, - sentinel: &mut VpSentinel, +pub fn get_chain_id( + gas_meter: &RefCell, + state: &S, + sentinel: &RefCell, ) -> EnvResult where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { - let (chain_id, gas) = storage.get_chain_id(); + let (chain_id, gas) = state.in_mem().get_chain_id(); add_gas(gas_meter, gas, sentinel)?; Ok(chain_id) } /// Getting the block height. The height is that of the block to which the /// current transaction is being applied. -pub fn get_block_height( - gas_meter: &mut VpGasMeter, - storage: &State, - sentinel: &mut VpSentinel, +pub fn get_block_height( + gas_meter: &RefCell, + state: &S, + sentinel: &RefCell, ) -> EnvResult where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { - let (height, gas) = storage.get_block_height(); + let (height, gas) = state.in_mem().get_block_height(); add_gas(gas_meter, gas, sentinel)?; Ok(height) } /// Getting the block header. -pub fn get_block_header( - gas_meter: &mut VpGasMeter, - storage: &State, +pub fn get_block_header( + gas_meter: &RefCell, + state: &S, height: BlockHeight, - sentinel: &mut VpSentinel, + sentinel: &RefCell, ) -> EnvResult> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { - let (header, gas) = storage - .get_block_header(Some(height)) + let (header, gas) = StateRead::get_block_header(state, Some(height)) .map_err(RuntimeError::StorageError)?; add_gas(gas_meter, gas, sentinel)?; Ok(header) @@ -285,16 +278,15 @@ where /// Getting the block hash. The height is that of the block to which the /// current transaction is being applied. -pub fn get_block_hash( - gas_meter: &mut VpGasMeter, - storage: &State, - sentinel: &mut VpSentinel, +pub fn get_block_hash( + gas_meter: &RefCell, + state: &S, + sentinel: &RefCell, ) -> EnvResult where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { - let (hash, gas) = storage.get_block_hash(); + let (hash, gas) = state.in_mem().get_block_hash(); add_gas(gas_meter, gas, sentinel)?; Ok(hash) } @@ -302,9 +294,9 @@ where /// Getting the block hash. The height is that of the block to which the /// current transaction is being applied. pub fn get_tx_code_hash( - gas_meter: &mut VpGasMeter, + gas_meter: &RefCell, tx: &Tx, - sentinel: &mut VpSentinel, + sentinel: &RefCell, ) -> EnvResult> { add_gas( gas_meter, @@ -320,16 +312,15 @@ pub fn get_tx_code_hash( /// Getting the block epoch. The epoch is that of the block to which the /// current transaction is being applied. -pub fn get_block_epoch( - gas_meter: &mut VpGasMeter, - storage: &State, - sentinel: &mut VpSentinel, +pub fn get_block_epoch( + gas_meter: &RefCell, + state: &S, + sentinel: &RefCell, ) -> EnvResult where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { - let (epoch, gas) = storage.get_current_epoch(); + let (epoch, gas) = state.in_mem().get_current_epoch(); add_gas(gas_meter, gas, sentinel)?; Ok(epoch) } @@ -337,9 +328,9 @@ where /// Getting the block epoch. The epoch is that of the block to which the /// current transaction is being applied. pub fn get_tx_index( - gas_meter: &mut VpGasMeter, + gas_meter: &RefCell, tx_index: &TxIndex, - sentinel: &mut VpSentinel, + sentinel: &RefCell, ) -> EnvResult { add_gas( gas_meter, @@ -350,50 +341,52 @@ pub fn get_tx_index( } /// Getting the native token's address. -pub fn get_native_token( - gas_meter: &mut VpGasMeter, - storage: &State, - sentinel: &mut VpSentinel, +pub fn get_native_token( + gas_meter: &RefCell, + state: &S, + sentinel: &RefCell, ) -> EnvResult
where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { add_gas( gas_meter, ESTABLISHED_ADDRESS_BYTES_LEN as u64 * MEMORY_ACCESS_GAS_PER_BYTE, sentinel, )?; - Ok(storage.native_token.clone()) + Ok(state.in_mem().native_token.clone()) } /// Given the information about predecessor block epochs -pub fn get_pred_epochs( - gas_meter: &mut VpGasMeter, - storage: &State, - sentinel: &mut VpSentinel, +pub fn get_pred_epochs( + gas_meter: &RefCell, + state: &S, + sentinel: &RefCell, ) -> EnvResult where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { add_gas( gas_meter, - storage.block.pred_epochs.first_block_heights.len() as u64 + state.in_mem().block.pred_epochs.first_block_heights.len() as u64 * 8 * MEMORY_ACCESS_GAS_PER_BYTE, sentinel, )?; - Ok(storage.block.pred_epochs.clone()) + Ok(state.in_mem().block.pred_epochs.clone()) } /// Getting the IBC event. -pub fn get_ibc_events( - _gas_meter: &mut VpGasMeter, - write_log: &WriteLog, +pub fn get_ibc_events( + _gas_meter: &RefCell, + state: &S, event_type: String, -) -> EnvResult> { - Ok(write_log +) -> EnvResult> +where + S: StateRead + Debug, +{ + Ok(state + .write_log() .get_ibc_events() .iter() .filter(|event| event.event_type == event_type) @@ -403,46 +396,49 @@ pub fn get_ibc_events( /// Storage prefix iterator for prior state (before tx execution), ordered by /// storage keys. It will try to get an iterator from the storage. -pub fn iter_prefix_pre<'a, DB, H>( - gas_meter: &mut VpGasMeter, +pub fn iter_prefix_pre<'a, D>( + gas_meter: &RefCell, + // We cannot use e.g. `&'a State`, because it doesn't live long + // enough - the lifetime of the `PrefixIter` must depend on the lifetime of + // references to the `WriteLog` and `DB`. write_log: &'a WriteLog, - storage: &'a State, + db: &'a D, prefix: &Key, - sentinel: &mut VpSentinel, -) -> EnvResult> + sentinel: &RefCell, +) -> EnvResult> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: DB + for<'iter> DBIter<'iter>, { - let (iter, gas) = namada_state::iter_prefix_pre(write_log, storage, prefix); + let (iter, gas) = namada_state::iter_prefix_pre(write_log, db, prefix); add_gas(gas_meter, gas, sentinel)?; Ok(iter) } /// Storage prefix iterator for posterior state (after tx execution), ordered by /// storage keys. It will try to get an iterator from the storage. -pub fn iter_prefix_post<'a, DB, H>( - gas_meter: &mut VpGasMeter, +pub fn iter_prefix_post<'a, D>( + gas_meter: &RefCell, + // We cannot use e.g. `&'a State`, because it doesn't live long + // enough - the lifetime of the `PrefixIter` must depend on the lifetime of + // references to the `WriteLog` and `DB`. write_log: &'a WriteLog, - storage: &'a State, + db: &'a D, prefix: &Key, - sentinel: &mut VpSentinel, -) -> EnvResult> + sentinel: &RefCell, +) -> EnvResult> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: DB + for<'iter> DBIter<'iter>, { - let (iter, gas) = - namada_state::iter_prefix_post(write_log, storage, prefix); + let (iter, gas) = namada_state::iter_prefix_post(write_log, db, prefix); add_gas(gas_meter, gas, sentinel)?; Ok(iter) } /// Get the next item in a storage prefix iterator (pre or post). pub fn iter_next( - gas_meter: &mut VpGasMeter, + gas_meter: &RefCell, iter: &mut namada_state::PrefixIter, - sentinel: &mut VpSentinel, + sentinel: &RefCell, ) -> EnvResult)>> where DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, diff --git a/crates/namada/src/lib.rs b/crates/namada/src/lib.rs index a01dba6ebb..47f2db3691 100644 --- a/crates/namada/src/lib.rs +++ b/crates/namada/src/lib.rs @@ -6,7 +6,13 @@ #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] -pub use namada_core::{tendermint, tendermint_proto}; +pub use namada_core::{ + address, chain, dec, decode, encode, eth_abi, eth_bridge_pool, + ethereum_events, ethereum_structs, hash, internal, keccak, key, masp, + storage, string_encoding, tendermint, tendermint_proto, time, uint, + validity_predicate, voting_power, +}; +pub use namada_sdk::{control_flow, io}; #[cfg(feature = "tendermint-rpc")] pub use tendermint_rpc; pub use { @@ -14,19 +20,19 @@ pub use { namada_ethereum_bridge as ethereum_bridge, namada_gas as gas, namada_governance as governance, namada_ibc as ibc, namada_parameters as parameters, namada_proof_of_stake as proof_of_stake, - namada_sdk as sdk, namada_state as state, namada_token as token, - namada_tx as tx, namada_vote_ext as vote_ext, + namada_replay_protection as replay_protection, namada_sdk as sdk, + namada_state as state, namada_token as token, namada_tx as tx, + namada_vote_ext as vote_ext, }; pub mod ledger; pub use namada_tx::proto; -pub mod types; pub mod vm; pub mod eth_bridge { //! Namada Ethereum bridge re-exports. pub use ethers; - pub use namada_core::types::ethereum_structs as structs; + pub use namada_core::ethereum_structs as structs; pub use namada_ethereum_bridge::*; } diff --git a/crates/namada/src/types/ibc/mod.rs b/crates/namada/src/types/ibc/mod.rs deleted file mode 100644 index 3c57da5203..0000000000 --- a/crates/namada/src/types/ibc/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! Types that are used in IBC. - -pub use namada_core::types::ibc::*; diff --git a/crates/namada/src/types/key/mod.rs b/crates/namada/src/types/key/mod.rs deleted file mode 100644 index 11a7af5533..0000000000 --- a/crates/namada/src/types/key/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! Cryptographic keys - -pub use namada_core::types::key::*; diff --git a/crates/namada/src/types/mod.rs b/crates/namada/src/types/mod.rs deleted file mode 100644 index 3a68fbc779..0000000000 --- a/crates/namada/src/types/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! Types definitions. - -pub use namada_sdk::control_flow; -pub mod ibc; -pub use namada_sdk::io; -pub mod key; - -pub use namada_core::types::{ - address, chain, dec, decode, encode, eth_abi, eth_bridge_pool, - ethereum_events, ethereum_structs, hash, internal, keccak, masp, storage, - string_encoding, time, token, uint, validity_predicate, voting_power, -}; diff --git a/crates/namada/src/vm/host_env.rs b/crates/namada/src/vm/host_env.rs index 34602ce477..b15aa6b553 100644 --- a/crates/namada/src/vm/host_env.rs +++ b/crates/namada/src/vm/host_env.rs @@ -1,22 +1,26 @@ //! Virtual machine's host environment exposes functions that may be called from //! within a virtual machine. +use std::cell::RefCell; use std::collections::BTreeSet; -use std::convert::TryInto; +use std::fmt::Debug; use std::num::TryFromIntError; use borsh::BorshDeserialize; use borsh_ext::BorshSerializeExt; use masp_primitives::transaction::Transaction; -use namada_core::types::address::ESTABLISHED_ADDRESS_BYTES_LEN; -use namada_core::types::internal::KeyVal; -use namada_core::types::storage::{Epochs, TX_INDEX_LENGTH}; -use namada_core::types::validity_predicate::VpSentinel; +use namada_core::address::ESTABLISHED_ADDRESS_BYTES_LEN; +use namada_core::internal::KeyVal; +use namada_core::storage::TX_INDEX_LENGTH; +use namada_core::validity_predicate::VpSentinel; use namada_gas::{ self as gas, GasMetering, TxGasMeter, VpGasMeter, MEMORY_ACCESS_GAS_PER_BYTE, }; use namada_state::write_log::{self, WriteLog}; -use namada_state::{self, ResultExt, State, StorageError, StorageHasher}; +use namada_state::{ + DBIter, InMemory, State, StateRead, StorageError, StorageHasher, + StorageRead, StorageWrite, TxHostEnvState, VpHostEnvState, DB, +}; use namada_token::storage_key::is_any_token_parameter_key; use namada_tx::data::TxSentinel; use namada_tx::Tx; @@ -27,16 +31,15 @@ use super::wasm::TxCache; #[cfg(feature = "wasm-runtime")] use super::wasm::VpCache; use super::WasmCacheAccess; +use crate::address::{self, Address}; +use crate::hash::Hash; +use crate::ibc::IbcEvent; +use crate::internal::HostEnvResult; use crate::ledger::vp_host_fns; +use crate::storage::{BlockHeight, Key, TxIndex}; use crate::token::storage_key::{ - balance_key, is_any_minted_balance_key, is_any_minter_key, - is_any_token_balance_key, minted_balance_key, minter_key, + is_any_minted_balance_key, is_any_minter_key, is_any_token_balance_key, }; -use crate::types::address::{self, Address}; -use crate::types::hash::Hash; -use crate::types::ibc::IbcEvent; -use crate::types::internal::HostEnvResult; -use crate::types::storage::{BlockHeight, Epoch, Key, TxIndex}; use crate::vm::memory::VmMemory; use crate::vm::prefix_iter::{PrefixIteratorId, PrefixIterators}; use crate::vm::{HostRef, MutHostRef}; @@ -63,7 +66,7 @@ pub enum TxRuntimeError { #[error("Storage error: {0}")] StorageError(#[from] StorageError), #[error("Storage data error: {0}")] - StorageDataError(crate::types::storage::Error), + StorageDataError(crate::storage::Error), #[error("Encoding error: {0}")] EncodingError(std::io::Error), #[error("Address error: {0}")] @@ -86,37 +89,39 @@ pub enum TxRuntimeError { pub type TxResult = std::result::Result; /// A transaction's host environment -pub struct TxVmEnv<'a, MEM, DB, H, CA> +pub struct TxVmEnv<'a, MEM, D, H, CA> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, CA: WasmCacheAccess, { /// The VM memory for bi-directional data passing pub memory: MEM, /// The tx context contains references to host structures. - pub ctx: TxCtx<'a, DB, H, CA>, + pub ctx: TxCtx<'a, D, H, CA>, } /// A transaction's host context #[derive(Debug)] -pub struct TxCtx<'a, DB, H, CA> +pub struct TxCtx<'a, D, H, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, CA: WasmCacheAccess, { - /// Read-only access to the storage. - pub storage: HostRef<'a, &'a State>, - /// Read/write access to the write log. + /// Mutable access to write log. pub write_log: MutHostRef<'a, &'a WriteLog>, + /// Read-only access to in-memory state. + pub in_mem: HostRef<'a, &'a InMemory>, + /// Read-only access to DB. + pub db: HostRef<'a, &'a D>, /// Storage prefix iterators. - pub iterators: MutHostRef<'a, &'a PrefixIterators<'a, DB>>, - /// Transaction gas meter. - pub gas_meter: MutHostRef<'a, &'a TxGasMeter>, - /// Transaction sentinel - pub sentinel: MutHostRef<'a, &'a TxSentinel>, + pub iterators: MutHostRef<'a, &'a PrefixIterators<'a, D>>, + /// Transaction gas meter. In `RefCell` to charge gas in read-only fns. + pub gas_meter: HostRef<'a, &'a RefCell>, + /// Transaction sentinel. In `RefCell` to charge gas in read-only fns. + pub sentinel: HostRef<'a, &'a RefCell>, /// The transaction code is used for signature verification pub tx: HostRef<'a, &'a Tx>, /// The transaction index is used to identify a shielded transaction's @@ -138,10 +143,10 @@ where pub cache_access: std::marker::PhantomData, } -impl<'a, MEM, DB, H, CA> TxVmEnv<'a, MEM, DB, H, CA> +impl<'a, MEM, D, H, CA> TxVmEnv<'a, MEM, D, H, CA> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, CA: WasmCacheAccess, { @@ -155,11 +160,12 @@ where #[allow(clippy::too_many_arguments)] pub fn new( memory: MEM, - storage: &State, write_log: &mut WriteLog, - iterators: &mut PrefixIterators<'a, DB>, - gas_meter: &mut TxGasMeter, - sentinel: &mut TxSentinel, + in_mem: &InMemory, + db: &D, + iterators: &mut PrefixIterators<'a, D>, + gas_meter: &RefCell, + sentinel: &RefCell, tx: &Tx, tx_index: &TxIndex, verifiers: &mut BTreeSet
, @@ -167,11 +173,12 @@ where #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, #[cfg(feature = "wasm-runtime")] tx_wasm_cache: &mut TxCache, ) -> Self { - let storage = unsafe { HostRef::new(storage) }; let write_log = unsafe { MutHostRef::new(write_log) }; + let in_mem = unsafe { HostRef::new(in_mem) }; + let db = unsafe { HostRef::new(db) }; let iterators = unsafe { MutHostRef::new(iterators) }; - let gas_meter = unsafe { MutHostRef::new(gas_meter) }; - let sentinel = unsafe { MutHostRef::new(sentinel) }; + let gas_meter = unsafe { HostRef::new(gas_meter) }; + let sentinel = unsafe { HostRef::new(sentinel) }; let tx = unsafe { HostRef::new(tx) }; let tx_index = unsafe { HostRef::new(tx_index) }; let verifiers = unsafe { MutHostRef::new(verifiers) }; @@ -181,8 +188,9 @@ where #[cfg(feature = "wasm-runtime")] let tx_wasm_cache = unsafe { MutHostRef::new(tx_wasm_cache) }; let ctx = TxCtx { - storage, write_log, + db, + in_mem, iterators, gas_meter, sentinel, @@ -200,12 +208,17 @@ where Self { memory, ctx } } + + /// Access state from within a tx + pub fn state(&self) -> TxHostEnvState { + self.ctx.state() + } } -impl Clone for TxVmEnv<'_, MEM, DB, H, CA> +impl Clone for TxVmEnv<'_, MEM, D, H, CA> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, CA: WasmCacheAccess, { @@ -217,16 +230,49 @@ where } } -impl<'a, DB, H, CA> Clone for TxCtx<'a, DB, H, CA> +impl<'a, D, H, CA> TxCtx<'a, D, H, CA> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, + CA: WasmCacheAccess, +{ + /// Access state from within a tx + pub fn state(&self) -> TxHostEnvState { + let write_log = unsafe { self.write_log.get() }; + let db = unsafe { self.db.get() }; + let in_mem = unsafe { self.in_mem.get() }; + let gas_meter = unsafe { self.gas_meter.get() }; + let sentinel = unsafe { self.sentinel.get() }; + TxHostEnvState { + write_log, + db, + in_mem, + gas_meter, + sentinel, + } + } + + /// Use gas meter and sentinel + pub fn gas_meter_and_sentinel( + &self, + ) -> (&RefCell, &RefCell) { + let gas_meter = unsafe { self.gas_meter.get() }; + let sentinel = unsafe { self.sentinel.get() }; + (gas_meter, sentinel) + } +} + +impl<'a, D, H, CA> Clone for TxCtx<'a, D, H, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, CA: WasmCacheAccess, { fn clone(&self) -> Self { Self { - storage: self.storage.clone(), write_log: self.write_log.clone(), + db: self.db.clone(), + in_mem: self.in_mem.clone(), iterators: self.iterators.clone(), gas_meter: self.gas_meter.clone(), sentinel: self.sentinel.clone(), @@ -245,10 +291,10 @@ where } /// A validity predicate's host environment -pub struct VpVmEnv<'a, MEM, DB, H, EVAL, CA> +pub struct VpVmEnv<'a, MEM, D, H, EVAL, CA> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, @@ -256,29 +302,31 @@ where /// The VM memory for bi-directional data passing pub memory: MEM, /// The VP context contains references to host structures. - pub ctx: VpCtx<'a, DB, H, EVAL, CA>, + pub ctx: VpCtx<'a, D, H, EVAL, CA>, } /// A validity predicate's host context -pub struct VpCtx<'a, DB, H, EVAL, CA> +pub struct VpCtx<'a, D, H, EVAL, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { /// The address of the account that owns the VP pub address: HostRef<'a, &'a Address>, - /// Read-only access to the storage. - pub storage: HostRef<'a, &'a State>, - /// Read-only access to the write log. + /// Read-only access to write log. pub write_log: HostRef<'a, &'a WriteLog>, + /// Read-only access to in-memory state. + pub in_mem: HostRef<'a, &'a InMemory>, + /// Read-only access to DB. + pub db: HostRef<'a, &'a D>, /// Storage prefix iterators. - pub iterators: MutHostRef<'a, &'a PrefixIterators<'a, DB>>, - /// VP gas meter. - pub gas_meter: MutHostRef<'a, &'a VpGasMeter>, - /// Errors sentinel - pub sentinel: MutHostRef<'a, &'a VpSentinel>, + pub iterators: MutHostRef<'a, &'a PrefixIterators<'a, D>>, + /// VP gas meter. In `RefCell` to charge gas in read-only fns. + pub gas_meter: HostRef<'a, &'a RefCell>, + /// Errors sentinel. In `RefCell` to charge gas in read-only fns. + pub sentinel: HostRef<'a, &'a RefCell>, /// The transaction code is used for signature verification pub tx: HostRef<'a, &'a Tx>, /// The transaction index is used to identify a shielded transaction's @@ -303,8 +351,8 @@ where /// A Validity predicate runner for calls from the [`vp_eval`] function. pub trait VpEvaluator { - /// Storage DB type - type Db: namada_state::DB + for<'iter> namada_state::DBIter<'iter>; + /// DB type + type Db: DB + for<'iter> DBIter<'iter>; /// Storage hasher type type H: StorageHasher; /// Recursive VP evaluator type @@ -325,11 +373,11 @@ pub trait VpEvaluator { ) -> HostEnvResult; } -impl<'a, MEM, DB, H, EVAL, CA> VpVmEnv<'a, MEM, DB, H, EVAL, CA> +impl<'a, MEM, D, H, EVAL, CA> VpVmEnv<'a, MEM, D, H, EVAL, CA> where - MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, + MEM: VmMemory, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -344,13 +392,14 @@ where pub fn new( memory: MEM, address: &Address, - storage: &State, write_log: &WriteLog, - gas_meter: &mut VpGasMeter, - sentinel: &mut VpSentinel, + in_mem: &InMemory, + db: &D, + gas_meter: &RefCell, + sentinel: &RefCell, tx: &Tx, tx_index: &TxIndex, - iterators: &mut PrefixIterators<'a, DB>, + iterators: &mut PrefixIterators<'a, D>, verifiers: &BTreeSet
, result_buffer: &mut Option>, keys_changed: &BTreeSet, @@ -359,8 +408,9 @@ where ) -> Self { let ctx = VpCtx::new( address, - storage, write_log, + in_mem, + db, gas_meter, sentinel, tx, @@ -376,12 +426,17 @@ where Self { memory, ctx } } + + /// Access state from within a VP + pub fn state(&self) -> VpHostEnvState { + self.ctx.state() + } } -impl Clone for VpVmEnv<'_, MEM, DB, H, EVAL, CA> +impl<'a, MEM, D, H, EVAL, CA> Clone for VpVmEnv<'a, MEM, D, H, EVAL, CA> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, @@ -394,9 +449,9 @@ where } } -impl<'a, DB, H, EVAL, CA> VpCtx<'a, DB, H, EVAL, CA> +impl<'a, D, H, EVAL, CA> VpCtx<'a, D, H, EVAL, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, @@ -411,13 +466,14 @@ where #[allow(clippy::too_many_arguments)] pub fn new( address: &Address, - storage: &State, write_log: &WriteLog, - gas_meter: &mut VpGasMeter, - sentinel: &mut VpSentinel, + in_mem: &InMemory, + db: &D, + gas_meter: &RefCell, + sentinel: &RefCell, tx: &Tx, tx_index: &TxIndex, - iterators: &mut PrefixIterators<'a, DB>, + iterators: &mut PrefixIterators<'a, D>, verifiers: &BTreeSet
, result_buffer: &mut Option>, keys_changed: &BTreeSet, @@ -425,13 +481,14 @@ where #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, ) -> Self { let address = unsafe { HostRef::new(address) }; - let storage = unsafe { HostRef::new(storage) }; let write_log = unsafe { HostRef::new(write_log) }; + let db = unsafe { HostRef::new(db) }; + let in_mem = unsafe { HostRef::new(in_mem) }; let tx = unsafe { HostRef::new(tx) }; let tx_index = unsafe { HostRef::new(tx_index) }; let iterators = unsafe { MutHostRef::new(iterators) }; - let gas_meter = unsafe { MutHostRef::new(gas_meter) }; - let sentinel = unsafe { MutHostRef::new(sentinel) }; + let gas_meter = unsafe { HostRef::new(gas_meter) }; + let sentinel = unsafe { HostRef::new(sentinel) }; let verifiers = unsafe { HostRef::new(verifiers) }; let result_buffer = unsafe { MutHostRef::new(result_buffer) }; let keys_changed = unsafe { HostRef::new(keys_changed) }; @@ -440,8 +497,9 @@ where let vp_wasm_cache = unsafe { MutHostRef::new(vp_wasm_cache) }; Self { address, - storage, write_log, + db, + in_mem, iterators, gas_meter, sentinel, @@ -457,11 +515,36 @@ where cache_access: std::marker::PhantomData, } } + + /// Access state from within a VP + pub fn state(&self) -> VpHostEnvState { + let write_log = unsafe { self.write_log.get() }; + let db = unsafe { self.db.get() }; + let in_mem = unsafe { self.in_mem.get() }; + let gas_meter = unsafe { self.gas_meter.get() }; + let sentinel = unsafe { self.sentinel.get() }; + VpHostEnvState { + write_log, + db, + in_mem, + gas_meter, + sentinel, + } + } + + /// Use gas meter and sentinel + pub fn gas_meter_and_sentinel( + &self, + ) -> (&RefCell, &RefCell) { + let gas_meter = unsafe { self.gas_meter.get() }; + let sentinel = unsafe { self.sentinel.get() }; + (gas_meter, sentinel) + } } -impl<'a, DB, H, EVAL, CA> Clone for VpCtx<'a, DB, H, EVAL, CA> +impl<'a, D, H, EVAL, CA> Clone for VpCtx<'a, D, H, EVAL, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, @@ -469,8 +552,9 @@ where fn clone(&self) -> Self { Self { address: self.address.clone(), - storage: self.storage.clone(), write_log: self.write_log.clone(), + db: self.db.clone(), + in_mem: self.in_mem.clone(), iterators: self.iterators.clone(), gas_meter: self.gas_meter.clone(), sentinel: self.sentinel.clone(), @@ -489,21 +573,20 @@ where } /// Add a gas cost incured in a transaction -pub fn tx_charge_gas( - env: &TxVmEnv, +pub fn tx_charge_gas( + env: &TxVmEnv, used_gas: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); // if we run out of gas, we need to stop the execution - gas_meter.consume(used_gas).map_err(|err| { - let sentinel = unsafe { env.ctx.sentinel.get() }; - sentinel.set_out_of_gas(); + gas_meter.borrow_mut().consume(used_gas).map_err(|err| { + sentinel.borrow_mut().set_out_of_gas(); tracing::info!( "Stopping transaction execution because of gas error: {}", err @@ -514,72 +597,48 @@ where } /// Called from VP wasm to request to use the given gas amount -pub fn vp_charge_gas( - env: &VpVmEnv, +pub fn vp_charge_gas( + env: &VpVmEnv, used_gas: u64, ) -> vp_host_fns::EnvResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); vp_host_fns::add_gas(gas_meter, used_gas, sentinel) } /// Storage `has_key` function exposed to the wasm VM Tx environment. It will /// try to check the write log first and if no entry found then the storage. -pub fn tx_has_key( - env: &TxVmEnv, +pub fn tx_has_key( + env: &TxVmEnv, key_ptr: u64, key_len: u64, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { let (key, gas) = env .memory .read_string(key_ptr, key_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; tracing::debug!("tx_has_key {}, key {}", key, key_ptr,); let key = Key::parse(key).map_err(TxRuntimeError::StorageDataError)?; // try to read from the write log first - let write_log = unsafe { env.ctx.write_log.get() }; - let (log_val, gas) = write_log.read(&key); - tx_charge_gas(env, gas)?; - Ok(match log_val { - Some(&write_log::StorageModification::Write { .. }) => { - HostEnvResult::Success.to_i64() - } - Some(&write_log::StorageModification::Delete) => { - // the given key has been deleted - HostEnvResult::Fail.to_i64() - } - Some(&write_log::StorageModification::InitAccount { .. }) => { - HostEnvResult::Success.to_i64() - } - Some(&write_log::StorageModification::Temp { .. }) => { - HostEnvResult::Success.to_i64() - } - None => { - // when not found in write log, try to check the storage - let storage = unsafe { env.ctx.storage.get() }; - let (present, gas) = - storage.has_key(&key).map_err(TxRuntimeError::StateError)?; - tx_charge_gas(env, gas)?; - HostEnvResult::from(present).to_i64() - } - }) + let state = env.state(); + let present = state.has_key(&key)?; + Ok(HostEnvResult::from(present).to_i64()) } /// Storage read function exposed to the wasm VM Tx environment. It will try to @@ -587,86 +646,41 @@ where /// /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). -pub fn tx_read( - env: &TxVmEnv, +pub fn tx_read( + env: &TxVmEnv, key_ptr: u64, key_len: u64, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { let (key, gas) = env .memory .read_string(key_ptr, key_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; tracing::debug!("tx_read {}, key {}", key, key_ptr,); let key = Key::parse(key).map_err(TxRuntimeError::StorageDataError)?; - // try to read from the write log first - let write_log = unsafe { env.ctx.write_log.get() }; - let (log_val, gas) = write_log.read(&key); - tx_charge_gas(env, gas)?; - Ok(match log_val { - Some(write_log::StorageModification::Write { ref value }) => { - let len: i64 = value - .len() - .try_into() - .map_err(TxRuntimeError::NumConversionError)?; - let result_buffer = unsafe { env.ctx.result_buffer.get() }; - result_buffer.replace(value.clone()); - len - } - Some(&write_log::StorageModification::Delete) => { - // fail, given key has been deleted - HostEnvResult::Fail.to_i64() - } - Some(write_log::StorageModification::InitAccount { - ref vp_code_hash, - }) => { - // read the VP of a new account - let len: i64 = vp_code_hash - .len() - .try_into() - .map_err(TxRuntimeError::NumConversionError)?; - let result_buffer = unsafe { env.ctx.result_buffer.get() }; - result_buffer.replace(vp_code_hash.to_vec()); - len - } - Some(write_log::StorageModification::Temp { ref value }) => { + let state = env.state(); + let value = state.read_bytes(&key)?; + match value { + Some(value) => { let len: i64 = value .len() .try_into() .map_err(TxRuntimeError::NumConversionError)?; let result_buffer = unsafe { env.ctx.result_buffer.get() }; - result_buffer.replace(value.clone()); - len - } - None => { - // when not found in write log, try to read from the storage - let storage = unsafe { env.ctx.storage.get() }; - let (value, gas) = - storage.read(&key).map_err(TxRuntimeError::StateError)?; - tx_charge_gas(env, gas)?; - match value { - Some(value) => { - let len: i64 = value - .len() - .try_into() - .map_err(TxRuntimeError::NumConversionError)?; - let result_buffer = unsafe { env.ctx.result_buffer.get() }; - result_buffer.replace(value); - len - } - None => HostEnvResult::Fail.to_i64(), - } + result_buffer.replace(value); + Ok(len) } - }) + None => Ok(HostEnvResult::Fail.to_i64()), + } } /// This function is a helper to handle the first step of reading var-len @@ -677,14 +691,14 @@ where /// first step reads the value into a result buffer and returns the size (if /// any) back to the guest, the second step reads the value from cache into a /// pre-allocated buffer with the obtained size. -pub fn tx_result_buffer( - env: &TxVmEnv, +pub fn tx_result_buffer( + env: &TxVmEnv, result_ptr: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { let result_buffer = unsafe { env.ctx.result_buffer.get() }; @@ -695,28 +709,28 @@ where .memory .write_bytes(result_ptr, value) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas) + tx_charge_gas::(env, gas) } /// Storage prefix iterator function exposed to the wasm VM Tx environment. /// It will try to get an iterator from the storage and return the corresponding /// ID of the iterator, ordered by storage keys. -pub fn tx_iter_prefix( - env: &TxVmEnv, +pub fn tx_iter_prefix( + env: &TxVmEnv, prefix_ptr: u64, prefix_len: u64, ) -> TxResult where MEM: VmMemory, - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { let (prefix, gas) = env .memory .read_string(prefix_ptr, prefix_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; tracing::debug!("tx_iter_prefix {}", prefix); @@ -724,10 +738,9 @@ where Key::parse(prefix).map_err(TxRuntimeError::StorageDataError)?; let write_log = unsafe { env.ctx.write_log.get() }; - let storage = unsafe { env.ctx.storage.get() }; - let (iter, gas) = - namada_state::iter_prefix_post(write_log, storage, &prefix); - tx_charge_gas(env, gas)?; + let db = unsafe { env.ctx.db.get() }; + let (iter, gas) = namada_state::iter_prefix_post(write_log, db, &prefix); + tx_charge_gas::(env, gas)?; let iterators = unsafe { env.ctx.iterators.get() }; Ok(iterators.insert(iter).id()) @@ -739,27 +752,27 @@ where /// /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). -pub fn tx_iter_next( - env: &TxVmEnv, +pub fn tx_iter_next( + env: &TxVmEnv, iter_id: u64, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { tracing::debug!("tx_iter_next iter_id {}", iter_id,); - let write_log = unsafe { env.ctx.write_log.get() }; + let state = env.state(); let iterators = unsafe { env.ctx.iterators.get() }; let iter_id = PrefixIteratorId::new(iter_id); while let Some((key, val, iter_gas)) = iterators.next(iter_id) { - let (log_val, log_gas) = write_log.read( + let (log_val, log_gas) = state.write_log().read( &Key::parse(key.clone()) .map_err(TxRuntimeError::StorageDataError)?, ); - tx_charge_gas(env, iter_gas + log_gas)?; + tx_charge_gas::(env, iter_gas + log_gas)?; match log_val { Some(write_log::StorageModification::Write { ref value }) => { let key_val = borsh::to_vec(&KeyVal { @@ -775,11 +788,11 @@ where result_buffer.replace(key_val); return Ok(len); } - Some(&write_log::StorageModification::Delete) => { + Some(write_log::StorageModification::Delete) => { // check the next because the key has already deleted continue; } - Some(&write_log::StorageModification::InitAccount { .. }) => { + Some(write_log::StorageModification::InitAccount { .. }) => { // a VP of a new account doesn't need to be iterated continue; } @@ -815,8 +828,8 @@ where /// Storage write function exposed to the wasm VM Tx environment. The given /// key/value will be written to the write log. -pub fn tx_write( - env: &TxVmEnv, +pub fn tx_write( + env: &TxVmEnv, key_ptr: u64, key_len: u64, val_ptr: u64, @@ -824,42 +837,41 @@ pub fn tx_write( ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { let (key, gas) = env .memory .read_string(key_ptr, key_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let (value, gas) = env .memory .read_bytes(val_ptr, val_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; tracing::debug!("tx_update {}, {:?}", key, value); let key = Key::parse(key).map_err(TxRuntimeError::StorageDataError)?; if key.is_validity_predicate().is_some() { - tx_validate_vp_code_hash(env, &value, &None)?; + tx_validate_vp_code_hash::(env, &value, &None)?; } - check_address_existence(env, &key)?; + check_address_existence::(env, &key)?; - let write_log = unsafe { env.ctx.write_log.get() }; - let (gas, _size_diff) = write_log - .write(&key, value) - .map_err(TxRuntimeError::StorageModificationError)?; - tx_charge_gas(env, gas) + let mut state = env.state(); + state + .write_bytes(&key, value) + .map_err(TxRuntimeError::StorageError) } /// Temporary storage write function exposed to the wasm VM Tx environment. The /// given key/value will be written only to the write log. It will be never /// written to the storage. -pub fn tx_write_temp( - env: &TxVmEnv, +pub fn tx_write_temp( + env: &TxVmEnv, key_ptr: u64, key_len: u64, val_ptr: u64, @@ -867,42 +879,43 @@ pub fn tx_write_temp( ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { let (key, gas) = env .memory .read_string(key_ptr, key_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let (value, gas) = env .memory .read_bytes(val_ptr, val_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; tracing::debug!("tx_write_temp {}, {:?}", key, value); let key = Key::parse(key).map_err(TxRuntimeError::StorageDataError)?; - check_address_existence(env, &key)?; + check_address_existence::(env, &key)?; - let write_log = unsafe { env.ctx.write_log.get() }; - let (gas, _size_diff) = write_log + let mut state = env.state(); + let (gas, _size_diff) = state + .write_log_mut() .write_temp(&key, value) .map_err(TxRuntimeError::StorageModificationError)?; - tx_charge_gas(env, gas) + tx_charge_gas::(env, gas) } -fn check_address_existence( - env: &TxVmEnv, +fn check_address_existence( + env: &TxVmEnv, key: &Key, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { // Get the token if the key is a balance or minter key @@ -914,8 +927,7 @@ where is_any_minted_balance_key(key).or_else(|| is_any_minter_key(key)) }; - let write_log = unsafe { env.ctx.write_log.get() }; - let storage = unsafe { env.ctx.storage.get() }; + let state = env.state(); for addr in key.find_addresses() { // skip if the address is a token address if Some(&addr) == token { @@ -926,15 +938,15 @@ where continue; } let vp_key = Key::validity_predicate(&addr); - let (vp, gas) = write_log.read(&vp_key); - tx_charge_gas(env, gas)?; + let (vp, gas) = state.write_log().read(&vp_key); + tx_charge_gas::(env, gas)?; // just check the existence because the write log should not have the // delete log of the VP if vp.is_none() { - let (is_present, gas) = storage - .has_key(&vp_key) + let (is_present, gas) = state + .db_has_key(&vp_key) .map_err(TxRuntimeError::StateError)?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; if !is_present { tracing::info!( "Trying to write into storage with a key containing an \ @@ -952,22 +964,22 @@ where /// Storage delete function exposed to the wasm VM Tx environment. The given /// key/value will be written as deleted to the write log. -pub fn tx_delete( - env: &TxVmEnv, +pub fn tx_delete( + env: &TxVmEnv, key_ptr: u64, key_len: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { let (key, gas) = env .memory .read_string(key_ptr, key_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; tracing::debug!("tx_delete {}", key); @@ -976,57 +988,55 @@ where return Err(TxRuntimeError::CannotDeleteVp); } - let write_log = unsafe { env.ctx.write_log.get() }; - let (gas, _size_diff) = write_log - .delete(&key) - .map_err(TxRuntimeError::StorageModificationError)?; - tx_charge_gas(env, gas) + let mut state = env.state(); + state.delete(&key).map_err(TxRuntimeError::StorageError) } /// Emitting an IBC event function exposed to the wasm VM Tx environment. /// The given IBC event will be set to the write log. -pub fn tx_emit_ibc_event( - env: &TxVmEnv, +pub fn tx_emit_ibc_event( + env: &TxVmEnv, event_ptr: u64, event_len: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { let (event, gas) = env .memory .read_bytes(event_ptr, event_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let event: IbcEvent = BorshDeserialize::try_from_slice(&event) .map_err(TxRuntimeError::EncodingError)?; - let write_log = unsafe { env.ctx.write_log.get() }; - let gas = write_log.emit_ibc_event(event); - tx_charge_gas(env, gas) + let mut state = env.state(); + let gas = state.write_log_mut().emit_ibc_event(event); + tx_charge_gas::(env, gas) } /// Getting an IBC event function exposed to the wasm VM Tx environment. -pub fn tx_get_ibc_events( - env: &TxVmEnv, +pub fn tx_get_ibc_events( + env: &TxVmEnv, event_type_ptr: u64, event_type_len: u64, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { let (event_type, gas) = env .memory .read_string(event_type_ptr, event_type_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; - let write_log = unsafe { env.ctx.write_log.get() }; - let events: Vec = write_log + tx_charge_gas::(env, gas)?; + let state = env.state(); + let events: Vec = state + .write_log() .get_ibc_events() .iter() .filter(|event| event.event_type == event_type) @@ -1047,15 +1057,15 @@ where /// /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). -pub fn vp_read_pre( - env: &VpVmEnv, +pub fn vp_read_pre( + env: &VpVmEnv, key_ptr: u64, key_len: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1063,17 +1073,14 @@ where .memory .read_string(key_ptr, key_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); vp_host_fns::add_gas(gas_meter, gas, sentinel)?; // try to read from the storage let key = Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; - let storage = unsafe { env.ctx.storage.get() }; - let write_log = unsafe { env.ctx.write_log.get() }; - let value = - vp_host_fns::read_pre(gas_meter, storage, write_log, &key, sentinel)?; + let state = env.state(); + let value = vp_host_fns::read_pre(gas_meter, &state, &key, sentinel)?; tracing::debug!( "vp_read_pre addr {}, key {}, value {:?}", unsafe { env.ctx.address.get() }, @@ -1100,15 +1107,15 @@ where /// /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). -pub fn vp_read_post( - env: &VpVmEnv, +pub fn vp_read_post( + env: &VpVmEnv, key_ptr: u64, key_len: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1116,8 +1123,7 @@ where .memory .read_string(key_ptr, key_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); vp_host_fns::add_gas(gas_meter, gas, sentinel)?; tracing::debug!("vp_read_post {}, key {}", key, key_ptr,); @@ -1125,10 +1131,8 @@ where // try to read from the write log first let key = Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; - let storage = unsafe { env.ctx.storage.get() }; - let write_log = unsafe { env.ctx.write_log.get() }; - let value = - vp_host_fns::read_post(gas_meter, storage, write_log, &key, sentinel)?; + let state = env.state(); + let value = vp_host_fns::read_post(gas_meter, &state, &key, sentinel)?; Ok(match value { Some(value) => { let len: i64 = value @@ -1148,15 +1152,15 @@ where /// /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). -pub fn vp_read_temp( - env: &VpVmEnv, +pub fn vp_read_temp( + env: &VpVmEnv, key_ptr: u64, key_len: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1164,8 +1168,7 @@ where .memory .read_string(key_ptr, key_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); vp_host_fns::add_gas(gas_meter, gas, sentinel)?; tracing::debug!("vp_read_temp {}, key {}", key, key_ptr); @@ -1173,8 +1176,8 @@ where // try to read from the write log let key = Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; - let write_log = unsafe { env.ctx.write_log.get() }; - let value = vp_host_fns::read_temp(gas_meter, write_log, &key, sentinel)?; + let state = env.state(); + let value = vp_host_fns::read_temp(gas_meter, &state, &key, sentinel)?; Ok(match value { Some(value) => { let len: i64 = value @@ -1197,14 +1200,14 @@ where /// first step reads the value into a result buffer and returns the size (if /// any) back to the guest, the second step reads the value from cache into a /// pre-allocated buffer with the obtained size. -pub fn vp_result_buffer( - env: &VpVmEnv, +pub fn vp_result_buffer( + env: &VpVmEnv, result_ptr: u64, ) -> vp_host_fns::EnvResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1216,22 +1219,21 @@ where .memory .write_bytes(result_ptr, value) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); vp_host_fns::add_gas(gas_meter, gas, sentinel) } /// Storage `has_key` in prior state (before tx execution) function exposed to /// the wasm VM VP environment. It will try to read from the storage. -pub fn vp_has_key_pre( - env: &VpVmEnv, +pub fn vp_has_key_pre( + env: &VpVmEnv, key_ptr: u64, key_len: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1239,34 +1241,30 @@ where .memory .read_string(key_ptr, key_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); vp_host_fns::add_gas(gas_meter, gas, sentinel)?; tracing::debug!("vp_has_key_pre {}, key {}", key, key_ptr,); let key = Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; - let storage = unsafe { env.ctx.storage.get() }; - let write_log = unsafe { env.ctx.write_log.get() }; - let present = vp_host_fns::has_key_pre( - gas_meter, storage, write_log, &key, sentinel, - )?; + let state = env.state(); + let present = vp_host_fns::has_key_pre(gas_meter, &state, &key, sentinel)?; Ok(HostEnvResult::from(present).to_i64()) } /// Storage `has_key` in posterior state (after tx execution) function exposed /// to the wasm VM VP environment. It will try to check the write log first and /// if no entry found then the storage. -pub fn vp_has_key_post( - env: &VpVmEnv, +pub fn vp_has_key_post( + env: &VpVmEnv, key_ptr: u64, key_len: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1274,19 +1272,15 @@ where .memory .read_string(key_ptr, key_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); vp_host_fns::add_gas(gas_meter, gas, sentinel)?; tracing::debug!("vp_has_key_post {}, key {}", key, key_ptr,); let key = Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; - let storage = unsafe { env.ctx.storage.get() }; - let write_log = unsafe { env.ctx.write_log.get() }; - let present = vp_host_fns::has_key_post( - gas_meter, storage, write_log, &key, sentinel, - )?; + let state = env.state(); + let present = vp_host_fns::has_key_post(gas_meter, &state, &key, sentinel)?; Ok(HostEnvResult::from(present).to_i64()) } @@ -1294,15 +1288,15 @@ where /// exposed to the wasm VM VP environment. It will try to get an iterator from /// the storage and return the corresponding ID of the iterator, ordered by /// storage keys. -pub fn vp_iter_prefix_pre( - env: &VpVmEnv, +pub fn vp_iter_prefix_pre( + env: &VpVmEnv, prefix_ptr: u64, prefix_len: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1310,8 +1304,7 @@ where .memory .read_string(prefix_ptr, prefix_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); vp_host_fns::add_gas(gas_meter, gas, sentinel)?; tracing::debug!("vp_iter_prefix_pre {}", prefix); @@ -1320,9 +1313,9 @@ where .map_err(vp_host_fns::RuntimeError::StorageDataError)?; let write_log = unsafe { env.ctx.write_log.get() }; - let storage = unsafe { env.ctx.storage.get() }; + let db = unsafe { env.ctx.db.get() }; let iter = vp_host_fns::iter_prefix_pre( - gas_meter, write_log, storage, &prefix, sentinel, + gas_meter, write_log, db, &prefix, sentinel, )?; let iterators = unsafe { env.ctx.iterators.get() }; @@ -1333,15 +1326,15 @@ where /// exposed to the wasm VM VP environment. It will try to get an iterator from /// the storage and return the corresponding ID of the iterator, ordered by /// storage keys. -pub fn vp_iter_prefix_post( - env: &VpVmEnv, +pub fn vp_iter_prefix_post( + env: &VpVmEnv, prefix_ptr: u64, prefix_len: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1349,8 +1342,7 @@ where .memory .read_string(prefix_ptr, prefix_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); vp_host_fns::add_gas(gas_meter, gas, sentinel)?; tracing::debug!("vp_iter_prefix_post {}", prefix); @@ -1359,9 +1351,9 @@ where .map_err(vp_host_fns::RuntimeError::StorageDataError)?; let write_log = unsafe { env.ctx.write_log.get() }; - let storage = unsafe { env.ctx.storage.get() }; + let db = unsafe { env.ctx.db.get() }; let iter = vp_host_fns::iter_prefix_post( - gas_meter, write_log, storage, &prefix, sentinel, + gas_meter, write_log, db, &prefix, sentinel, )?; let iterators = unsafe { env.ctx.iterators.get() }; @@ -1373,14 +1365,14 @@ where /// /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). -pub fn vp_iter_next( - env: &VpVmEnv, +pub fn vp_iter_next( + env: &VpVmEnv, iter_id: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1389,8 +1381,7 @@ where let iterators = unsafe { env.ctx.iterators.get() }; let iter_id = PrefixIteratorId::new(iter_id); if let Some(iter) = iterators.get_mut(iter_id) { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); if let Some((key, val)) = vp_host_fns::iter_next(gas_meter, iter, sentinel)? { @@ -1409,22 +1400,22 @@ where } /// Verifier insertion function exposed to the wasm VM Tx environment. -pub fn tx_insert_verifier( - env: &TxVmEnv, +pub fn tx_insert_verifier( + env: &TxVmEnv, addr_ptr: u64, addr_len: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { let (addr, gas) = env .memory .read_string(addr_ptr, addr_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; tracing::debug!("tx_insert_verifier {}, addr_ptr {}", addr, addr_ptr,); @@ -1433,100 +1424,104 @@ where let verifiers = unsafe { env.ctx.verifiers.get() }; // This is not a storage write, use the same multiplier used for a storage // read - tx_charge_gas(env, addr_len * MEMORY_ACCESS_GAS_PER_BYTE)?; + tx_charge_gas::(env, addr_len * MEMORY_ACCESS_GAS_PER_BYTE)?; verifiers.insert(addr); Ok(()) } /// Initialize a new account established address. -pub fn tx_init_account( - env: &TxVmEnv, +pub fn tx_init_account( + env: &TxVmEnv, result_ptr: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { tracing::debug!("tx_init_account"); - let storage = unsafe { env.ctx.storage.get() }; - let write_log = unsafe { env.ctx.write_log.get() }; + let mut state = env.state(); let hash_key = Key::wasm_hash("vp_user.wasm"); - let (vp_hash, gas) = storage - .read(&hash_key) + let (vp_hash, gas) = state + .db_read(&hash_key) .map_err(TxRuntimeError::StateError)?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; + let (write_log, in_mem, _db) = state.split_borrow(); + let gen = &in_mem.address_gen; let code_hash = Hash::try_from( &vp_hash.ok_or(TxRuntimeError::StorageError( StorageError::SimpleMessage("Missing hash of vp_user in storage"), ))?[..], ) .map_err(|e| TxRuntimeError::InvalidVpCodeHash(e.to_string()))?; - let (addr, gas) = write_log.init_account(&storage.address_gen, code_hash); + let (addr, gas) = write_log.init_account(gen, code_hash); let addr_bytes = addr.serialize_to_vec(); - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let gas = env .memory .write_bytes(result_ptr, addr_bytes) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas) + tx_charge_gas::(env, gas) } /// Getting the chain ID function exposed to the wasm VM Tx environment. -pub fn tx_get_chain_id( - env: &TxVmEnv, +pub fn tx_get_chain_id( + env: &TxVmEnv, result_ptr: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { - let storage = unsafe { env.ctx.storage.get() }; - let (chain_id, gas) = storage.get_chain_id(); - tx_charge_gas(env, gas)?; + let state = env.state(); + let (chain_id, gas) = state.in_mem().get_chain_id(); + tx_charge_gas::(env, gas)?; let gas = env .memory .write_string(result_ptr, chain_id) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas) + tx_charge_gas::(env, gas) } /// Getting the block height function exposed to the wasm VM Tx /// environment. The height is that of the block to which the current /// transaction is being applied. -pub fn tx_get_block_height( - env: &TxVmEnv, +pub fn tx_get_block_height( + env: &TxVmEnv, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { - let storage = unsafe { env.ctx.storage.get() }; - let (height, gas) = storage.get_block_height(); - tx_charge_gas(env, gas)?; + let state = env.state(); + let (height, gas) = state.in_mem().get_block_height(); + tx_charge_gas::(env, gas)?; Ok(height.0) } /// Getting the transaction index function exposed to the wasm VM Tx /// environment. The index is that of the transaction being applied /// in the current block. -pub fn tx_get_tx_index( - env: &TxVmEnv, +pub fn tx_get_tx_index( + env: &TxVmEnv, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { - tx_charge_gas(env, TX_INDEX_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE)?; + tx_charge_gas::( + env, + TX_INDEX_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, + )?; let tx_index = unsafe { env.ctx.tx_index.get() }; Ok(tx_index.0) } @@ -1534,18 +1529,17 @@ where /// Getting the block height function exposed to the wasm VM VP /// environment. The height is that of the block to which the current /// transaction is being applied. -pub fn vp_get_tx_index( - env: &VpVmEnv, +pub fn vp_get_tx_index( + env: &VpVmEnv, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); let tx_index = unsafe { env.ctx.tx_index.get() }; let tx_idx = vp_host_fns::get_tx_index(gas_meter, tx_index, sentinel)?; Ok(tx_idx.0) @@ -1553,109 +1547,113 @@ where /// Getting the block hash function exposed to the wasm VM Tx environment. The /// hash is that of the block to which the current transaction is being applied. -pub fn tx_get_block_hash( - env: &TxVmEnv, +pub fn tx_get_block_hash( + env: &TxVmEnv, result_ptr: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { - let storage = unsafe { env.ctx.storage.get() }; - let (hash, gas) = storage.get_block_hash(); - tx_charge_gas(env, gas)?; + let state = env.state(); + let (hash, gas) = state.in_mem().get_block_hash(); + tx_charge_gas::(env, gas)?; let gas = env .memory .write_bytes(result_ptr, hash.0) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas) + tx_charge_gas::(env, gas) } /// Getting the block epoch function exposed to the wasm VM Tx /// environment. The epoch is that of the block to which the current /// transaction is being applied. -pub fn tx_get_block_epoch( - env: &TxVmEnv, +pub fn tx_get_block_epoch( + env: &TxVmEnv, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { - let storage = unsafe { env.ctx.storage.get() }; - let (epoch, gas) = storage.get_current_epoch(); - tx_charge_gas(env, gas)?; + let state = env.state(); + let (epoch, gas) = state.in_mem().get_current_epoch(); + tx_charge_gas::(env, gas)?; Ok(epoch.0) } /// Get predecessor epochs function exposed to the wasm VM Tx environment. -pub fn tx_get_pred_epochs( - env: &TxVmEnv, +pub fn tx_get_pred_epochs( + env: &TxVmEnv, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { - let storage = unsafe { env.ctx.storage.get() }; - let pred_epochs = storage.block.pred_epochs.clone(); + let state = env.state(); + let pred_epochs = state.in_mem().block.pred_epochs.clone(); let bytes = pred_epochs.serialize_to_vec(); let len: i64 = bytes .len() .try_into() .map_err(TxRuntimeError::NumConversionError)?; - tx_charge_gas(env, MEMORY_ACCESS_GAS_PER_BYTE * len as u64)?; + tx_charge_gas::( + env, + MEMORY_ACCESS_GAS_PER_BYTE * len as u64, + )?; let result_buffer = unsafe { env.ctx.result_buffer.get() }; result_buffer.replace(bytes); Ok(len) } /// Get the native token's address -pub fn tx_get_native_token( - env: &TxVmEnv, +pub fn tx_get_native_token( + env: &TxVmEnv, result_ptr: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { // Gas for getting the native token address from storage - tx_charge_gas( + tx_charge_gas::( env, ESTABLISHED_ADDRESS_BYTES_LEN as u64 * MEMORY_ACCESS_GAS_PER_BYTE, )?; - let storage = unsafe { env.ctx.storage.get() }; - let native_token = storage.native_token.clone(); + let state = env.state(); + let native_token = state.in_mem().native_token.clone(); let native_token_string = native_token.encode(); let gas = env .memory .write_string(result_ptr, native_token_string) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas) + tx_charge_gas::(env, gas) } /// Getting the block header function exposed to the wasm VM Tx environment. -pub fn tx_get_block_header( - env: &TxVmEnv, +pub fn tx_get_block_header( + env: &TxVmEnv, height: u64, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { - let storage = unsafe { env.ctx.storage.get() }; - let (header, gas) = storage - .get_block_header(Some(BlockHeight(height))) - .map_err(TxRuntimeError::StateError)?; - tx_charge_gas(env, gas)?; + let state = env.state(); + let (header, gas) = + StateRead::get_block_header(&state, Some(BlockHeight(height))) + .map_err(TxRuntimeError::StateError)?; + + tx_charge_gas::(env, gas)?; Ok(match header { Some(h) => { let value = h.serialize_to_vec(); @@ -1672,21 +1670,20 @@ where } /// Getting the chain ID function exposed to the wasm VM VP environment. -pub fn vp_get_chain_id( - env: &VpVmEnv, +pub fn vp_get_chain_id( + env: &VpVmEnv, result_ptr: u64, ) -> vp_host_fns::EnvResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - let storage = unsafe { env.ctx.storage.get() }; - let chain_id = vp_host_fns::get_chain_id(gas_meter, storage, sentinel)?; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); + let state = env.state(); + let chain_id = vp_host_fns::get_chain_id(gas_meter, &state, sentinel)?; let gas = env .memory .write_string(result_ptr, chain_id) @@ -1697,41 +1694,39 @@ where /// Getting the block height function exposed to the wasm VM VP /// environment. The height is that of the block to which the current /// transaction is being applied. -pub fn vp_get_block_height( - env: &VpVmEnv, +pub fn vp_get_block_height( + env: &VpVmEnv, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - let storage = unsafe { env.ctx.storage.get() }; - let height = vp_host_fns::get_block_height(gas_meter, storage, sentinel)?; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); + let state = env.state(); + let height = vp_host_fns::get_block_height(gas_meter, &state, sentinel)?; Ok(height.0) } /// Getting the block header function exposed to the wasm VM VP environment. -pub fn vp_get_block_header( - env: &VpVmEnv, +pub fn vp_get_block_header( + env: &VpVmEnv, height: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - let storage = unsafe { env.ctx.storage.get() }; - let (header, gas) = storage - .get_block_header(Some(BlockHeight(height))) - .map_err(vp_host_fns::RuntimeError::StorageError)?; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); + let state = env.state(); + let (header, gas) = + StateRead::get_block_header(&state, Some(BlockHeight(height))) + .map_err(vp_host_fns::RuntimeError::StorageError)?; vp_host_fns::add_gas(gas_meter, gas, sentinel)?; Ok(match header { Some(h) => { @@ -1750,21 +1745,20 @@ where /// Getting the block hash function exposed to the wasm VM VP environment. The /// hash is that of the block to which the current transaction is being applied. -pub fn vp_get_block_hash( - env: &VpVmEnv, +pub fn vp_get_block_hash( + env: &VpVmEnv, result_ptr: u64, ) -> vp_host_fns::EnvResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - let storage = unsafe { env.ctx.storage.get() }; - let hash = vp_host_fns::get_block_hash(gas_meter, storage, sentinel)?; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); + let state = env.state(); + let hash = vp_host_fns::get_block_hash(gas_meter, &state, sentinel)?; let gas = env .memory .write_bytes(result_ptr, hash.0) @@ -1773,19 +1767,18 @@ where } /// Getting the transaction hash function exposed to the wasm VM VP environment. -pub fn vp_get_tx_code_hash( - env: &VpVmEnv, +pub fn vp_get_tx_code_hash( + env: &VpVmEnv, result_ptr: u64, ) -> vp_host_fns::EnvResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); let tx = unsafe { env.ctx.tx.get() }; let hash = vp_host_fns::get_tx_code_hash(gas_meter, tx, sentinel)?; let mut result_bytes = vec![]; @@ -1805,39 +1798,37 @@ where /// Getting the block epoch function exposed to the wasm VM VP /// environment. The epoch is that of the block to which the current /// transaction is being applied. -pub fn vp_get_block_epoch( - env: &VpVmEnv, +pub fn vp_get_block_epoch( + env: &VpVmEnv, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - let storage = unsafe { env.ctx.storage.get() }; - let epoch = vp_host_fns::get_block_epoch(gas_meter, storage, sentinel)?; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); + let state = env.state(); + let epoch = vp_host_fns::get_block_epoch(gas_meter, &state, sentinel)?; Ok(epoch.0) } /// Get predecessor epochs function exposed to the wasm VM VP environment. -pub fn vp_get_pred_epochs( - env: &VpVmEnv, +pub fn vp_get_pred_epochs( + env: &VpVmEnv, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - let storage = unsafe { env.ctx.storage.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); + let state = env.state(); let pred_epochs = - vp_host_fns::get_pred_epochs(gas_meter, storage, sentinel)?; + vp_host_fns::get_pred_epochs(gas_meter, &state, sentinel)?; let bytes = pred_epochs.serialize_to_vec(); let len: i64 = bytes .len() @@ -1849,15 +1840,15 @@ where } /// Getting the IBC event function exposed to the wasm VM VP environment. -pub fn vp_get_ibc_events( - env: &VpVmEnv, +pub fn vp_get_ibc_events( + env: &VpVmEnv, event_type_ptr: u64, event_type_len: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1865,12 +1856,11 @@ where .memory .read_string(event_type_ptr, event_type_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); vp_host_fns::add_gas(gas_meter, gas, sentinel)?; - let write_log = unsafe { env.ctx.write_log.get() }; - let events = vp_host_fns::get_ibc_events(gas_meter, write_log, event_type)?; + let state = env.state(); + let events = vp_host_fns::get_ibc_events(gas_meter, &state, event_type)?; let value = events.serialize_to_vec(); let len: i64 = value .len() @@ -1886,8 +1876,8 @@ where /// verifications. When the runtime gas meter is implemented, this function can /// be removed #[allow(clippy::too_many_arguments)] -pub fn vp_verify_tx_section_signature( - env: &VpVmEnv, +pub fn vp_verify_tx_section_signature( + env: &VpVmEnv, hash_list_ptr: u64, hash_list_len: u64, public_keys_map_ptr: u64, @@ -1900,8 +1890,8 @@ pub fn vp_verify_tx_section_signature( ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1910,8 +1900,7 @@ where .read_bytes(hash_list_ptr, hash_list_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); vp_host_fns::add_gas(gas_meter, gas, sentinel)?; let hashes = <[Hash; 1]>::try_from_slice(&hash_list) .map_err(vp_host_fns::RuntimeError::EncodingError)?; @@ -1922,7 +1911,7 @@ where .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; vp_host_fns::add_gas(gas_meter, gas, sentinel)?; let public_keys_map = - namada_core::types::account::AccountPublicKeysMap::try_from_slice( + namada_core::account::AccountPublicKeysMap::try_from_slice( &public_keys_map, ) .map_err(vp_host_fns::RuntimeError::EncodingError)?; @@ -1951,16 +1940,16 @@ where &Some(signer), threshold, max_signatures, - || gas_meter.consume(gas::VERIFY_TX_SIG_GAS), + || gas_meter.borrow_mut().consume(gas::VERIFY_TX_SIG_GAS), ) { Ok(_) => Ok(HostEnvResult::Success.to_i64()), Err(err) => match err { namada_tx::VerifySigError::Gas(inner) => { - sentinel.set_out_of_gas(); + sentinel.borrow_mut().set_out_of_gas(); Err(vp_host_fns::RuntimeError::OutOfGas(inner)) } namada_tx::VerifySigError::InvalidSectionSignature(_) => { - sentinel.set_invalid_signature(); + sentinel.borrow_mut().set_invalid_signature(); Ok(HostEnvResult::Fail.to_i64()) } _ => Ok(HostEnvResult::Fail.to_i64()), @@ -1971,15 +1960,15 @@ where /// Log a string from exposed to the wasm VM Tx environment. The message will be /// printed at the [`tracing::Level::INFO`]. This function is for development /// only. -pub fn tx_log_string( - env: &TxVmEnv, +pub fn tx_log_string( + env: &TxVmEnv, str_ptr: u64, str_len: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { let (str, _gas) = env @@ -1993,28 +1982,27 @@ where /// Execute IBC tx. // Temporarily the IBC tx execution is implemented via a host function to // workaround wasm issue. -pub fn tx_ibc_execute( - env: &TxVmEnv, +pub fn tx_ibc_execute( + env: &TxVmEnv, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { - use std::cell::RefCell; use std::rc::Rc; use namada_ibc::{IbcActions, TransferModule}; let tx_data = unsafe { env.ctx.tx.get().data() }.ok_or_else(|| { let sentinel = unsafe { env.ctx.sentinel.get() }; - sentinel.set_invalid_commitment(); + sentinel.borrow_mut().set_invalid_commitment(); TxRuntimeError::MissingTxData })?; - let ctx = Rc::new(RefCell::new(env.ctx.clone())); - let mut actions = IbcActions::new(ctx.clone()); - let module = TransferModule::new(ctx); + let state = Rc::new(RefCell::new(env.state())); + let mut actions = IbcActions::new(state.clone()); + let module = TransferModule::new(state); actions.add_transfer_module(module.module_id(), module); actions.execute(&tx_data)?; @@ -2022,28 +2010,28 @@ where } /// Validate a VP WASM code hash in a tx environment. -fn tx_validate_vp_code_hash( - env: &TxVmEnv, +fn tx_validate_vp_code_hash( + env: &TxVmEnv, code_hash: &[u8], code_tag: &Option, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { let code_hash = Hash::try_from(code_hash) .map_err(|e| TxRuntimeError::InvalidVpCodeHash(e.to_string()))?; + let state = env.state(); // First check that code hash corresponds to the code tag if it is present if let Some(tag) = code_tag { - let storage = unsafe { env.ctx.storage.get() }; let hash_key = Key::wasm_hash(tag); - let (result, gas) = storage - .read(&hash_key) + let (result, gas) = state + .db_read(&hash_key) .map_err(TxRuntimeError::StateError)?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; if let Some(tag_hash) = result { let tag_hash = Hash::try_from(&tag_hash[..]).map_err(|e| { TxRuntimeError::InvalidVpCodeHash(e.to_string()) @@ -2063,7 +2051,7 @@ where } // Then check that VP code hash is in the allowlist. - if !crate::parameters::is_vp_allowed(&env.ctx, &code_hash) + if !crate::parameters::is_vp_allowed(&env.ctx.state(), &code_hash) .map_err(TxRuntimeError::StorageError)? { return Err(TxRuntimeError::DisallowedVp); @@ -2071,15 +2059,13 @@ where // Then check that the corresponding VP code does indeed exist let code_key = Key::wasm_code(&code_hash); - let write_log = unsafe { env.ctx.write_log.get() }; - let (result, gas) = write_log.read(&code_key); - tx_charge_gas(env, gas)?; + let (result, gas) = state.write_log().read(&code_key); + tx_charge_gas::(env, gas)?; if result.is_none() { - let storage = unsafe { env.ctx.storage.get() }; - let (is_present, gas) = storage - .has_key(&code_key) + let (is_present, gas) = state + .db_has_key(&code_key) .map_err(TxRuntimeError::StateError)?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; if !is_present { return Err(TxRuntimeError::InvalidVpCodeHash( "The corresponding VP code doesn't exist".to_string(), @@ -2090,21 +2076,21 @@ where } /// Set the sentinel for an invalid tx section commitment -pub fn tx_set_commitment_sentinel(env: &TxVmEnv) +pub fn tx_set_commitment_sentinel(env: &TxVmEnv) where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, CA: WasmCacheAccess, { let sentinel = unsafe { env.ctx.sentinel.get() }; - sentinel.set_invalid_commitment(); + sentinel.borrow_mut().set_invalid_commitment(); } /// Verify a transaction signature #[allow(clippy::too_many_arguments)] -pub fn tx_verify_tx_section_signature( - env: &TxVmEnv, +pub fn tx_verify_tx_section_signature( + env: &TxVmEnv, hash_list_ptr: u64, hash_list_len: u64, public_keys_map_ptr: u64, @@ -2115,8 +2101,8 @@ pub fn tx_verify_tx_section_signature( ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { let (hash_list, gas) = env @@ -2124,9 +2110,7 @@ where .read_bytes(hash_list_ptr, hash_list_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - let sentinel = unsafe { env.ctx.sentinel.get() }; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let hashes = <[Hash; 1]>::try_from_slice(&hash_list) .map_err(TxRuntimeError::EncodingError)?; @@ -2134,37 +2118,38 @@ where .memory .read_bytes(public_keys_map_ptr, public_keys_map_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let public_keys_map = - namada_core::types::account::AccountPublicKeysMap::try_from_slice( + namada_core::account::AccountPublicKeysMap::try_from_slice( &public_keys_map, ) .map_err(TxRuntimeError::EncodingError)?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let (max_signatures, gas) = env .memory .read_bytes(max_signatures_ptr, max_signatures_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let max_signatures = Option::::try_from_slice(&max_signatures) .map_err(TxRuntimeError::EncodingError)?; let tx = unsafe { env.ctx.tx.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); match tx.verify_signatures( &hashes, public_keys_map, &None, threshold, max_signatures, - || gas_meter.consume(gas::VERIFY_TX_SIG_GAS), + || gas_meter.borrow_mut().consume(gas::VERIFY_TX_SIG_GAS), ) { Ok(_) => Ok(HostEnvResult::Success.to_i64()), Err(err) => match err { namada_tx::VerifySigError::Gas(inner) => { - sentinel.set_out_of_gas(); + sentinel.borrow_mut().set_out_of_gas(); Err(TxRuntimeError::OutOfGas(inner)) } namada_tx::VerifySigError::InvalidSectionSignature(_) => { @@ -2176,15 +2161,15 @@ where } /// Appends the new note commitments to the tree in storage -pub fn tx_update_masp_note_commitment_tree( - env: &TxVmEnv, +pub fn tx_update_masp_note_commitment_tree( + env: &TxVmEnv, transaction_ptr: u64, transaction_len: u64, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, CA: WasmCacheAccess, { let _sentinel = unsafe { env.ctx.sentinel.get() }; @@ -2194,13 +2179,12 @@ where .read_bytes(transaction_ptr, transaction_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let transaction = Transaction::try_from_slice(&serialized_transaction) .map_err(TxRuntimeError::EncodingError)?; - let mut ctx = env.ctx.clone(); match crate::token::utils::update_note_commitment_tree( - &mut ctx, + &mut env.state(), &transaction, ) { Ok(()) => Ok(HostEnvResult::Success.to_i64()), @@ -2214,8 +2198,8 @@ where } /// Evaluate a validity predicate with the given input data. -pub fn vp_eval( - env: &VpVmEnv<'static, MEM, DB, H, EVAL, CA>, +pub fn vp_eval( + env: &VpVmEnv<'static, MEM, D, H, EVAL, CA>, vp_code_hash_ptr: u64, vp_code_hash_len: u64, input_data_ptr: u64, @@ -2223,56 +2207,58 @@ pub fn vp_eval( ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, - EVAL: VpEvaluator, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { let (vp_code_hash, gas) = env .memory .read_bytes(vp_code_hash_ptr, vp_code_hash_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; - let (input_data, gas) = env - .memory - .read_bytes(input_data_ptr, input_data_len as _) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; - let input_data: Tx = BorshDeserialize::try_from_slice(&input_data) - .map_err(vp_host_fns::RuntimeError::EncodingError)?; + // The borrowed `gas_meter` and `sentinel` must be dropped before eval, + // which has to borrow these too. + let tx = { + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); + vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + + let (input_data, gas) = env + .memory + .read_bytes(input_data_ptr, input_data_len as _) + .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let tx: Tx = BorshDeserialize::try_from_slice(&input_data) + .map_err(vp_host_fns::RuntimeError::EncodingError)?; + tx + }; + + let eval_runner = unsafe { env.ctx.eval_runner.get() }; let vp_code_hash = Hash(vp_code_hash.try_into().map_err(|e| { vp_host_fns::RuntimeError::EncodingError(std::io::Error::new( std::io::ErrorKind::InvalidData, format!("Not a valid hash: {:?}", e), )) })?); - - let eval_runner = unsafe { env.ctx.eval_runner.get() }; - Ok(eval_runner - .eval(env.ctx.clone(), vp_code_hash, input_data) - .to_i64()) + Ok(eval_runner.eval(env.ctx.clone(), vp_code_hash, tx).to_i64()) } /// Get the native token's address -pub fn vp_get_native_token( - env: &VpVmEnv, +pub fn vp_get_native_token( + env: &VpVmEnv, result_ptr: u64, ) -> vp_host_fns::EnvResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - let storage = unsafe { env.ctx.storage.get() }; + let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); + let state = env.state(); let native_token = - vp_host_fns::get_native_token(gas_meter, storage, sentinel)?; + vp_host_fns::get_native_token(gas_meter, &state, sentinel)?; let native_token_string = native_token.encode(); let gas = env .memory @@ -2284,15 +2270,15 @@ where /// Log a string from exposed to the wasm VM VP environment. The message will be /// printed at the [`tracing::Level::INFO`]. This function is for development /// only. -pub fn vp_log_string( - env: &VpVmEnv, +pub fn vp_log_string( + env: &VpVmEnv, str_ptr: u64, str_len: u64, ) -> vp_host_fns::EnvResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -2304,407 +2290,43 @@ where Ok(()) } -// Temp. workaround for -use namada_state::StorageRead; - -use crate::types::storage::BlockHash; -impl<'a, DB, H, CA> StorageRead for TxCtx<'a, DB, H, CA> -where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, - CA: WasmCacheAccess, -{ - // type PrefixIter<'iter> = KeyValIterator<(String, Vec)>; - type PrefixIter<'iter> = u64 where Self: 'iter; - - fn read_bytes( - &self, - key: &Key, - ) -> std::result::Result>, StorageError> { - let write_log = unsafe { self.write_log.get() }; - let (log_val, gas) = write_log.read(key); - ibc_tx_charge_gas(self, gas)?; - Ok(match log_val { - Some(write_log::StorageModification::Write { ref value }) => { - Some(value.clone()) - } - Some(&write_log::StorageModification::Delete) => None, - Some(write_log::StorageModification::InitAccount { - ref vp_code_hash, - }) => Some(vp_code_hash.to_vec()), - Some(write_log::StorageModification::Temp { ref value }) => { - Some(value.clone()) - } - None => { - // when not found in write log, try to read from the storage - let storage = unsafe { self.storage.get() }; - let (value, gas) = storage.read(key).into_storage_result()?; - ibc_tx_charge_gas(self, gas)?; - value - } - }) - } - - fn has_key(&self, key: &Key) -> Result { - // try to read from the write log first - let write_log = unsafe { self.write_log.get() }; - let (log_val, gas) = write_log.read(key); - ibc_tx_charge_gas(self, gas)?; - Ok(match log_val { - Some(&write_log::StorageModification::Write { .. }) => true, - Some(&write_log::StorageModification::Delete) => false, - Some(&write_log::StorageModification::InitAccount { .. }) => true, - Some(&write_log::StorageModification::Temp { .. }) => true, - None => { - // when not found in write log, try to check the storage - let storage = unsafe { self.storage.get() }; - let (present, gas) = - storage.has_key(key).into_storage_result()?; - ibc_tx_charge_gas(self, gas)?; - present - } - }) - } - - fn iter_prefix<'iter>( - &'iter self, - prefix: &Key, - ) -> Result, StorageError> { - let write_log = unsafe { self.write_log.get() }; - let storage = unsafe { self.storage.get() }; - let (iter, gas) = - namada_state::iter_prefix_post(write_log, storage, prefix); - ibc_tx_charge_gas(self, gas)?; - - let iterators = unsafe { self.iterators.get() }; - Ok(iterators.insert(iter).id()) - } - - fn iter_next<'iter>( - &'iter self, - iter_id: &mut Self::PrefixIter<'iter>, - ) -> Result)>, StorageError> { - let write_log = unsafe { self.write_log.get() }; - let iterators = unsafe { self.iterators.get() }; - let iter_id = PrefixIteratorId::new(*iter_id); - while let Some((key, val, iter_gas)) = iterators.next(iter_id) { - let (log_val, log_gas) = - write_log.read(&Key::parse(key.clone()).into_storage_result()?); - ibc_tx_charge_gas(self, iter_gas + log_gas)?; - match log_val { - Some(write_log::StorageModification::Write { ref value }) => { - return Ok(Some((key, value.clone()))); - } - Some(&write_log::StorageModification::Delete) => { - // check the next because the key has already deleted - continue; - } - Some(&write_log::StorageModification::InitAccount { - .. - }) => { - // a VP of a new account doesn't need to be iterated - continue; - } - Some(write_log::StorageModification::Temp { ref value }) => { - return Ok(Some((key, value.clone()))); - } - None => { - return Ok(Some((key, val))); - } - } - } - Ok(None) - } - - fn get_chain_id(&self) -> Result { - let storage = unsafe { self.storage.get() }; - let (chain_id, gas) = storage.get_chain_id(); - ibc_tx_charge_gas(self, gas)?; - Ok(chain_id) - } - - fn get_block_height(&self) -> Result { - let storage = unsafe { self.storage.get() }; - let (height, gas) = storage.get_block_height(); - ibc_tx_charge_gas(self, gas)?; - Ok(height) - } - - fn get_block_header( - &self, - height: BlockHeight, - ) -> Result, StorageError> { - let storage = unsafe { self.storage.get() }; - let (header, gas) = storage - .get_block_header(Some(height)) - .into_storage_result()?; - ibc_tx_charge_gas(self, gas)?; - Ok(header) - } - - fn get_block_hash(&self) -> Result { - let storage = unsafe { self.storage.get() }; - let (hash, gas) = storage.get_block_hash(); - ibc_tx_charge_gas(self, gas)?; - Ok(hash) - } - - fn get_block_epoch(&self) -> Result { - let storage = unsafe { self.storage.get() }; - let (epoch, gas) = storage.get_current_epoch(); - ibc_tx_charge_gas(self, gas)?; - Ok(epoch) - } - - fn get_tx_index(&self) -> Result { - let tx_index = unsafe { self.tx_index.get() }; - ibc_tx_charge_gas( - self, - crate::vm::host_env::gas::STORAGE_ACCESS_GAS_PER_BYTE, - )?; - Ok(TxIndex(tx_index.0)) - } - - fn get_native_token(&self) -> Result { - let storage = unsafe { self.storage.get() }; - let native_token = storage.native_token.clone(); - ibc_tx_charge_gas( - self, - crate::vm::host_env::gas::STORAGE_ACCESS_GAS_PER_BYTE, - )?; - Ok(native_token) - } - - fn get_pred_epochs(&self) -> namada_state::StorageResult { - let storage = unsafe { self.storage.get() }; - ibc_tx_charge_gas( - self, - crate::vm::host_env::gas::STORAGE_ACCESS_GAS_PER_BYTE, - )?; - Ok(storage.block.pred_epochs.clone()) - } -} - -// Temp. workaround for -use namada_state::StorageWrite; -impl<'a, DB, H, CA> StorageWrite for TxCtx<'a, DB, H, CA> -where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, - CA: WasmCacheAccess, -{ - fn write_bytes( - &mut self, - key: &Key, - data: impl AsRef<[u8]>, - ) -> Result<(), StorageError> { - let write_log = unsafe { self.write_log.get() }; - let (gas, _size_diff) = write_log - .write(key, data.as_ref().to_vec()) - .into_storage_result()?; - ibc_tx_charge_gas(self, gas) - } - - fn delete(&mut self, key: &Key) -> Result<(), StorageError> { - if key.is_validity_predicate().is_some() { - return Err(TxRuntimeError::CannotDeleteVp).into_storage_result(); - } - - let write_log = unsafe { self.write_log.get() }; - let (gas, _size_diff) = write_log.delete(key).into_storage_result()?; - ibc_tx_charge_gas(self, gas) - } -} - -// Temp. workaround for -impl<'a, DB, H, CA> namada_ibc::IbcStorageContext for TxCtx<'a, DB, H, CA> -where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, - CA: WasmCacheAccess, -{ - fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<(), StorageError> { - let write_log = unsafe { self.write_log.get() }; - let gas = write_log.emit_ibc_event(event); - ibc_tx_charge_gas(self, gas) - } - - fn get_ibc_events( - &self, - event_type: impl AsRef, - ) -> Result, StorageError> { - let write_log = unsafe { self.write_log.get() }; - Ok(write_log - .get_ibc_events() - .iter() - .filter(|event| event.event_type == event_type.as_ref()) - .cloned() - .collect()) - } - - fn transfer_token( - &mut self, - src: &Address, - dest: &Address, - token: &Address, - amount: crate::token::DenominatedAmount, - ) -> Result<(), StorageError> { - use crate::token; - - let amount = token::denom_to_amount(amount, token, self)?; - if amount != token::Amount::default() && src != dest { - let src_key = balance_key(token, src); - let dest_key = balance_key(token, dest); - let src_bal = self.read::(&src_key)?; - let mut src_bal = src_bal.ok_or_else(|| { - StorageError::new_const("the source has no balance") - })?; - src_bal.spend(&amount).into_storage_result()?; - let mut dest_bal = - self.read::(&dest_key)?.unwrap_or_default(); - dest_bal.receive(&amount).into_storage_result()?; - self.write(&src_key, src_bal)?; - self.write(&dest_key, dest_bal)?; - } - Ok(()) - } - - fn handle_masp_tx( - &mut self, - shielded: &masp_primitives::transaction::Transaction, - pin_key: Option<&str>, - ) -> Result<(), StorageError> { - crate::token::utils::handle_masp_tx(self, shielded, pin_key)?; - crate::token::utils::update_note_commitment_tree(self, shielded) - } - - fn mint_token( - &mut self, - target: &Address, - token: &Address, - amount: crate::token::DenominatedAmount, - ) -> Result<(), StorageError> { - use crate::token; - - let amount = token::denom_to_amount(amount, token, self)?; - let target_key = balance_key(token, target); - let mut target_bal = - self.read::(&target_key)?.unwrap_or_default(); - target_bal.receive(&amount).into_storage_result()?; - - let minted_key = minted_balance_key(token); - let mut minted_bal = - self.read::(&minted_key)?.unwrap_or_default(); - minted_bal.receive(&amount).into_storage_result()?; - - self.write(&target_key, target_bal)?; - self.write(&minted_key, minted_bal)?; - - let minter_key = minter_key(token); - self.write( - &minter_key, - Address::Internal(address::InternalAddress::Ibc), - ) - } - - fn burn_token( - &mut self, - target: &Address, - token: &Address, - amount: crate::token::DenominatedAmount, - ) -> Result<(), StorageError> { - use crate::token; - - let amount = token::denom_to_amount(amount, token, self)?; - let target_key = balance_key(token, target); - let mut target_bal = - self.read::(&target_key)?.unwrap_or_default(); - target_bal.spend(&amount).into_storage_result()?; - - // burn the minted amount - let minted_key = minted_balance_key(token); - let mut minted_bal = - self.read::(&minted_key)?.unwrap_or_default(); - minted_bal.spend(&amount).into_storage_result()?; - - self.write(&target_key, target_bal)?; - self.write(&minted_key, minted_bal) - } - - fn log_string(&self, message: String) { - tracing::info!("IBC host env log: {}", message); - } -} - -/// Add a gas cost incured in a transaction -// Temp helper. -fn ibc_tx_charge_gas<'a, DB, H, CA>( - ctx: &TxCtx<'a, DB, H, CA>, - used_gas: u64, -) -> Result<(), StorageError> -where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, - CA: WasmCacheAccess, -{ - let gas_meter = unsafe { ctx.gas_meter.get() }; - // if we run out of gas, we need to stop the execution - let result = gas_meter.consume(used_gas).into_storage_result(); - if let Err(err) = &result { - let sentinel = unsafe { ctx.sentinel.get() }; - sentinel.set_out_of_gas(); - tracing::info!( - "Stopping transaction execution because of gas error: {}", - err - ); - } - result -} - -// Temp. workaround for -impl<'a, DB, H, CA> namada_ibc::IbcCommonContext for TxCtx<'a, DB, H, CA> -where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, - CA: WasmCacheAccess, -{ -} - /// A helper module for testing #[cfg(feature = "testing")] pub mod testing { - use std::collections::BTreeSet; - - use namada_state::StorageHasher; - use super::*; use crate::vm::memory::testing::NativeMemory; use crate::vm::wasm::memory::WasmMemory; /// Setup a transaction environment #[allow(clippy::too_many_arguments)] - pub fn tx_env( - storage: &State, - write_log: &mut WriteLog, - iterators: &mut PrefixIterators<'static, DB>, + pub fn tx_env( + state: &mut S, + iterators: &mut PrefixIterators<'static, ::D>, verifiers: &mut BTreeSet
, - gas_meter: &mut TxGasMeter, - sentinel: &mut TxSentinel, + gas_meter: &RefCell, + sentinel: &RefCell, tx: &Tx, tx_index: &TxIndex, result_buffer: &mut Option>, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, #[cfg(feature = "wasm-runtime")] tx_wasm_cache: &mut TxCache, - ) -> TxVmEnv<'static, NativeMemory, DB, H, CA> + ) -> TxVmEnv< + 'static, + NativeMemory, + ::D, + ::H, + CA, + > where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: State, CA: WasmCacheAccess, { + let (write_log, in_mem, db) = state.split_borrow(); TxVmEnv::new( NativeMemory, - storage, write_log, + in_mem, + db, iterators, gas_meter, sentinel, @@ -2721,22 +2343,26 @@ pub mod testing { /// Setup a transaction environment #[allow(clippy::too_many_arguments)] - pub fn tx_env_with_wasm_memory( - storage: &State, - write_log: &mut WriteLog, - iterators: &mut PrefixIterators<'static, DB>, + pub fn tx_env_with_wasm_memory( + state: &mut S, + iterators: &mut PrefixIterators<'static, ::D>, verifiers: &mut BTreeSet
, - gas_meter: &mut TxGasMeter, - sentinel: &mut TxSentinel, + gas_meter: &RefCell, + sentinel: &RefCell, tx: &Tx, tx_index: &TxIndex, result_buffer: &mut Option>, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, #[cfg(feature = "wasm-runtime")] tx_wasm_cache: &mut TxCache, - ) -> TxVmEnv<'static, WasmMemory, DB, H, CA> + ) -> TxVmEnv< + 'static, + WasmMemory, + ::D, + ::H, + CA, + > where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: State, CA: WasmCacheAccess, { let store = crate::vm::wasm::compilation_cache::common::store(); @@ -2745,10 +2371,12 @@ pub mod testing { let mut wasm_memory = WasmMemory::default(); wasm_memory.inner.initialize(initial_memory); + let (write_log, in_mem, db) = state.split_borrow(); TxVmEnv::new( wasm_memory, - storage, write_log, + in_mem, + db, iterators, gas_meter, sentinel, @@ -2765,13 +2393,12 @@ pub mod testing { /// Setup a validity predicate environment #[allow(clippy::too_many_arguments)] - pub fn vp_env( + pub fn vp_env( address: &Address, - storage: &State, - write_log: &WriteLog, - iterators: &mut PrefixIterators<'static, DB>, - gas_meter: &mut VpGasMeter, - sentinel: &mut VpSentinel, + state: &S, + iterators: &mut PrefixIterators<'static, ::D>, + gas_meter: &RefCell, + sentinel: &RefCell, tx: &Tx, tx_index: &TxIndex, verifiers: &BTreeSet
, @@ -2779,18 +2406,25 @@ pub mod testing { keys_changed: &BTreeSet, eval_runner: &EVAL, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, - ) -> VpVmEnv<'static, NativeMemory, DB, H, EVAL, CA> + ) -> VpVmEnv< + 'static, + NativeMemory, + ::D, + ::H, + EVAL, + CA, + > where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead, EVAL: VpEvaluator, CA: WasmCacheAccess, { VpVmEnv::new( NativeMemory, address, - storage, - write_log, + state.write_log(), + state.in_mem(), + state.db(), gas_meter, sentinel, tx, diff --git a/crates/namada/src/vm/types.rs b/crates/namada/src/vm/types.rs index 54cc170371..a724fabb90 100644 --- a/crates/namada/src/vm/types.rs +++ b/crates/namada/src/vm/types.rs @@ -2,19 +2,17 @@ //! memory. //! //! These are either: -//! 1. Module call types -//! The module call inputs are passed host-to-guest. +//! 1. Module call types The module call inputs are passed host-to-guest. //! -//! 2. Execution environment types -//! The environment inputs are passed guest-to-host and outputs back from -//! host-to-guest. +//! 2. Execution environment types The environment inputs are passed +//! guest-to-host and outputs back from host-to-guest. use std::collections::BTreeSet; use namada_tx::Tx; -use crate::types::address::Address; -use crate::types::storage; +use crate::address::Address; +use crate::storage; /// Input for validity predicate wasm module call pub struct VpInput<'a> { diff --git a/crates/namada/src/vm/wasm/compilation_cache/common.rs b/crates/namada/src/vm/wasm/compilation_cache/common.rs index ce3758678e..cb9291b14a 100644 --- a/crates/namada/src/vm/wasm/compilation_cache/common.rs +++ b/crates/namada/src/vm/wasm/compilation_cache/common.rs @@ -18,8 +18,8 @@ use clru::{CLruCache, CLruCacheConfig, WeightScale}; use wasmer::{Module, Store}; use wasmer_cache::{FileSystemCache, Hash as CacheHash}; -use crate::core::types::hash::Hash; -use crate::types::control_flow::time::{ExponentialBackoff, SleepStrategy}; +use crate::control_flow::time::{ExponentialBackoff, SleepStrategy}; +use crate::core::hash::Hash; use crate::vm::wasm::run::untrusted_wasm_store; use crate::vm::wasm::{self, memory}; use crate::vm::{WasmCacheAccess, WasmCacheRoAccess}; diff --git a/crates/namada/src/vm/wasm/host_env.rs b/crates/namada/src/vm/wasm/host_env.rs index caa614a185..679d8379b4 100644 --- a/crates/namada/src/vm/wasm/host_env.rs +++ b/crates/namada/src/vm/wasm/host_env.rs @@ -3,7 +3,7 @@ //! Here, we expose the host functions into wasm's //! imports, so they can be called from inside the wasm. -use namada_core::types::hash::StorageHasher; +use namada_state::{DBIter, StorageHasher, DB}; use wasmer::{ Function, HostEnvInitError, ImportObject, Instance, Memory, Store, WasmerEnv, @@ -13,9 +13,9 @@ use crate::vm::host_env::{TxVmEnv, VpEvaluator, VpVmEnv}; use crate::vm::wasm::memory::WasmMemory; use crate::vm::{host_env, WasmCacheAccess}; -impl WasmerEnv for TxVmEnv<'_, WasmMemory, DB, H, CA> +impl WasmerEnv for TxVmEnv<'_, WasmMemory, D, H, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, CA: WasmCacheAccess, { @@ -27,9 +27,9 @@ where } } -impl WasmerEnv for VpVmEnv<'_, WasmMemory, DB, H, EVAL, CA> +impl WasmerEnv for VpVmEnv<'_, WasmMemory, D, H, EVAL, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, @@ -45,13 +45,13 @@ where /// Prepare imports (memory and host functions) exposed to the vm guest running /// transaction code #[allow(clippy::too_many_arguments)] -pub fn tx_imports( +pub fn tx_imports( wasm_store: &Store, initial_memory: Memory, - env: TxVmEnv<'static, WasmMemory, DB, H, CA>, + env: TxVmEnv<'static, WasmMemory, D, H, CA>, ) -> ImportObject where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, CA: WasmCacheAccess, { @@ -87,22 +87,22 @@ where "namada_tx_ibc_execute" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_ibc_execute), "namada_tx_set_commitment_sentinel" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_set_commitment_sentinel), "namada_tx_verify_tx_section_signature" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_verify_tx_section_signature), - "namada_tx_update_masp_note_commitment_tree" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_update_masp_note_commitment_tree) + "namada_tx_update_masp_note_commitment_tree" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_update_masp_note_commitment_tree), }, } } /// Prepare imports (memory and host functions) exposed to the vm guest running /// validity predicate code -pub fn vp_imports( +pub fn vp_imports( wasm_store: &Store, initial_memory: Memory, - env: VpVmEnv<'static, WasmMemory, DB, H, EVAL, CA>, + env: VpVmEnv<'static, WasmMemory, D, H, EVAL, CA>, ) -> ImportObject where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, - EVAL: VpEvaluator, + EVAL: VpEvaluator, CA: WasmCacheAccess, { wasmer::imports! { diff --git a/crates/namada/src/vm/wasm/memory.rs b/crates/namada/src/vm/wasm/memory.rs index 35923e512b..671af064d3 100644 --- a/crates/namada/src/vm/wasm/memory.rs +++ b/crates/namada/src/vm/wasm/memory.rs @@ -453,9 +453,7 @@ impl Tunables for Limit { #[cfg(test)] pub mod tests { - use wasmer::{ - wat2wasm, BaseTunables, Cranelift, Instance, Module, Store, Target, - }; + use wasmer::{wat2wasm, Cranelift, Instance, Module, Store}; use super::*; diff --git a/crates/namada/src/vm/wasm/run.rs b/crates/namada/src/vm/wasm/run.rs index 8d84185135..817e5c6432 100644 --- a/crates/namada/src/vm/wasm/run.rs +++ b/crates/namada/src/vm/wasm/run.rs @@ -1,13 +1,15 @@ //! Wasm runners +use std::cell::RefCell; use std::collections::BTreeSet; +use std::fmt::Debug; use std::marker::PhantomData; use borsh::BorshDeserialize; -use namada_core::types::validity_predicate::VpSentinel; +use namada_core::validity_predicate::VpSentinel; use namada_gas::{GasMetering, TxGasMeter, WASM_MEMORY_PAGE_GAS}; use namada_state::write_log::StorageModification; -use namada_state::{State, StorageHasher}; +use namada_state::{DBIter, State, StateRead, StorageHasher, DB}; use namada_tx::data::TxSentinel; use namada_tx::{Commitment, Section, Tx}; use parity_wasm::elements; @@ -16,12 +18,11 @@ use wasmer::{BaseTunables, Module, Store}; use super::memory::{Limit, WasmMemory}; use super::TxCache; +use crate::address::Address; +use crate::hash::{Error as TxHashError, Hash}; +use crate::internal::HostEnvResult; use crate::ledger::gas::VpGasMeter; -use crate::state::write_log::WriteLog; -use crate::types::address::Address; -use crate::types::hash::{Error as TxHashError, Hash}; -use crate::types::internal::HostEnvResult; -use crate::types::storage::{Key, TxIndex}; +use crate::storage::{Key, TxIndex}; use crate::vm::host_env::{TxVmEnv, VpCtx, VpEvaluator, VpVmEnv}; use crate::vm::prefix_iter::PrefixIterators; use crate::vm::types::VpInput; @@ -91,18 +92,16 @@ pub type Result = std::result::Result; /// Execute a transaction code. Returns the set verifiers addresses requested by /// the transaction. #[allow(clippy::too_many_arguments)] -pub fn tx( - storage: &State, - write_log: &mut WriteLog, - gas_meter: &mut TxGasMeter, +pub fn tx( + state: &mut S, + gas_meter: &RefCell, tx_index: &TxIndex, tx: &Tx, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, ) -> Result> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead + State, CA: 'static + WasmCacheAccess, { let tx_code = tx @@ -115,16 +114,12 @@ where if let Some(tag) = &tx_code.tag { // Get the WASM code hash corresponding to the tag from storage let hash_key = Key::wasm_hash(tag); - let hash_value = match storage - .read(&hash_key) - .map_err(|e| { - Error::LoadWasmCode(format!( - "Read wasm code hash failed from storage: key {}, error {}", - hash_key, e - )) - })? - .0 - { + let hash_value = match state.read_bytes(&hash_key).map_err(|e| { + Error::LoadWasmCode(format!( + "Read wasm code hash failed from storage: key {}, error {}", + hash_key, e + )) + })? { Some(v) => Hash::try_from_slice(&v) .map_err(|e| Error::ConversionError(e.to_string()))?, None => { @@ -145,26 +140,24 @@ where } } - let (module, store) = fetch_or_compile( - tx_wasm_cache, - &tx_code.code, - write_log, - storage, - gas_meter, - )?; + let (module, store) = + fetch_or_compile(tx_wasm_cache, &tx_code.code, state, gas_meter)?; - let mut iterators: PrefixIterators<'_, DB> = PrefixIterators::default(); + let mut iterators: PrefixIterators<'_, ::D> = + PrefixIterators::default(); let mut verifiers = BTreeSet::new(); let mut result_buffer: Option> = None; - let mut sentinel = TxSentinel::default(); + let sentinel = RefCell::new(TxSentinel::default()); + let (write_log, in_mem, db) = state.split_borrow(); let env = TxVmEnv::new( WasmMemory::default(), - storage, write_log, + in_mem, + db, &mut iterators, gas_meter, - &mut sentinel, + &sentinel, tx, tx_index, &mut verifiers, @@ -203,7 +196,7 @@ where })?; apply_tx.call(tx_data_ptr, tx_data_len).map_err(|err| { tracing::debug!("Tx WASM failed with {}", err); - match sentinel { + match *sentinel.borrow() { TxSentinel::None => Error::RuntimeError(err), TxSentinel::OutOfGas => Error::GasError(err.to_string()), TxSentinel::InvalidCommitment => { @@ -219,48 +212,47 @@ where /// predicate accepted storage modifications performed by the transaction /// that triggered the execution. #[allow(clippy::too_many_arguments)] -pub fn vp( +pub fn vp( vp_code_hash: Hash, tx: &Tx, tx_index: &TxIndex, address: &Address, - storage: &State, - write_log: &WriteLog, - gas_meter: &mut VpGasMeter, + state: &S, + gas_meter: &RefCell, keys_changed: &BTreeSet, verifiers: &BTreeSet
, mut vp_wasm_cache: VpCache, ) -> Result where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { // Compile the wasm module let (module, store) = fetch_or_compile( &mut vp_wasm_cache, &Commitment::Hash(vp_code_hash), - write_log, - storage, + state, gas_meter, )?; - let mut iterators: PrefixIterators<'_, DB> = PrefixIterators::default(); + let mut iterators: PrefixIterators<'_, ::D> = + PrefixIterators::default(); let mut result_buffer: Option> = None; - let eval_runner = VpEvalWasm { - db: PhantomData, - hasher: PhantomData, - cache_access: PhantomData, - }; - - let mut sentinel = VpSentinel::default(); + let eval_runner = + VpEvalWasm::<::D, ::H, CA> { + db: PhantomData, + hasher: PhantomData, + cache_access: PhantomData, + }; + let sentinel = RefCell::new(VpSentinel::default()); let env = VpVmEnv::new( WasmMemory::default(), address, - storage, - write_log, + state.write_log(), + state.in_mem(), + state.db(), gas_meter, - &mut sentinel, + &sentinel, tx, tx_index, &mut iterators, @@ -283,10 +275,9 @@ where address, keys_changed, verifiers, - gas_meter, ) { Ok(accept) => { - if sentinel.is_invalid_signature() { + if sentinel.borrow().is_invalid_signature() { if accept { // This is unexpected, if the signature is invalid the vp // should have rejected the tx. Something must be wrong with @@ -305,7 +296,7 @@ where } } Err(err) => { - if sentinel.is_out_of_gas() { + if sentinel.borrow().is_out_of_gas() { Err(Error::GasError(err.to_string())) } else { Err(err) @@ -323,7 +314,6 @@ fn run_vp( address: &Address, keys_changed: &BTreeSet, verifiers: &BTreeSet
, - _gas_meter: &mut VpGasMeter, ) -> Result { let input: VpInput = VpInput { addr: address, @@ -381,34 +371,34 @@ fn run_vp( /// Validity predicate wasm evaluator for `eval` host function calls. #[derive(Default, Debug)] -pub struct VpEvalWasm +pub struct VpEvalWasm where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, CA: WasmCacheAccess, { /// Phantom type for DB - pub db: PhantomData<*const DB>, - /// Phantom type for DB Hasher + pub db: PhantomData<*const D>, + /// Phantom type for hasher pub hasher: PhantomData<*const H>, /// Phantom type for WASM compilation cache access pub cache_access: PhantomData<*const CA>, } -impl VpEvaluator for VpEvalWasm +impl VpEvaluator for VpEvalWasm where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, CA: WasmCacheAccess, { type CA = CA; - type Db = DB; + type Db = D; type Eval = Self; type H = H; fn eval( &self, - ctx: VpCtx<'static, DB, H, Self, CA>, + ctx: VpCtx<'static, D, H, Self, CA>, vp_code_hash: Hash, input_data: Tx, ) -> HostEnvResult { @@ -422,16 +412,16 @@ where } } -impl VpEvalWasm +impl VpEvalWasm where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, CA: WasmCacheAccess, { /// Evaluate the given VP. pub fn eval_native_result( &self, - ctx: VpCtx<'static, DB, H, Self, CA>, + ctx: VpCtx<'static, D, H, Self, CA>, vp_code_hash: Hash, input_data: Tx, ) -> Result { @@ -439,26 +429,23 @@ where let keys_changed = unsafe { ctx.keys_changed.get() }; let verifiers = unsafe { ctx.verifiers.get() }; let vp_wasm_cache = unsafe { ctx.vp_wasm_cache.get() }; - let write_log = unsafe { ctx.write_log.get() }; - let storage = unsafe { ctx.storage.get() }; let gas_meter = unsafe { ctx.gas_meter.get() }; - let env = VpVmEnv { - memory: WasmMemory::default(), - ctx, - }; // Compile the wasm module let (module, store) = fetch_or_compile( vp_wasm_cache, &Commitment::Hash(vp_code_hash), - write_log, - storage, + &ctx.state(), gas_meter, )?; let initial_memory = memory::prepare_vp_memory(&store).map_err(Error::MemoryError)?; + let env = VpVmEnv { + memory: WasmMemory::default(), + ctx, + }; let imports = vp_imports(&store, initial_memory, env); run_vp( @@ -469,7 +456,6 @@ where address, keys_changed, verifiers, - gas_meter, ) } } @@ -504,16 +490,14 @@ pub fn prepare_wasm_code>(code: T) -> Result> { // Fetch or compile a WASM code from the cache or storage. Account for the // loading and code compilation gas costs. -fn fetch_or_compile( +fn fetch_or_compile( wasm_cache: &mut Cache, code_or_hash: &Commitment, - write_log: &WriteLog, - storage: &State, - gas_meter: &mut dyn GasMetering, + state: &S, + gas_meter: &RefCell, ) -> Result<(Module, Store)> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CN: 'static + CacheName, CA: 'static + WasmCacheAccess, { @@ -523,14 +507,14 @@ where Some((module, store)) => { // Gas accounting even if the compiled module is in cache let key = Key::wasm_code_len(code_hash); - let tx_len = match write_log.read(&key).0 { + let tx_len = match state.write_log().read(&key).0 { Some(StorageModification::Write { value }) => { u64::try_from_slice(value).map_err(|e| { Error::ConversionError(e.to_string()) }) } - _ => match storage - .read(&key) + _ => match state + .db_read(&key) .map_err(|e| { Error::LoadWasmCode(format!( "Read wasm code length failed from \ @@ -554,12 +538,12 @@ where } None => { let key = Key::wasm_code(code_hash); - let code = match write_log.read(&key).0 { + let code = match state.write_log().read(&key).0 { Some(StorageModification::Write { value }) => { value.clone() } - _ => match storage - .read(&key) + _ => match state + .db_read(&key) .map_err(|e| { Error::LoadWasmCode(format!( "Read wasm code failed from storage: key \ @@ -589,9 +573,11 @@ where }; gas_meter + .borrow_mut() .add_wasm_load_from_storage_gas(tx_len) .map_err(|e| Error::GasError(e.to_string()))?; gas_meter + .borrow_mut() .add_compiling_gas(tx_len) .map_err(|e| Error::GasError(e.to_string()))?; Ok((module, store)) @@ -599,11 +585,13 @@ where Commitment::Id(code) => { let tx_len = code.len() as u64; gas_meter + .borrow_mut() .add_wasm_validation_gas(tx_len) .map_err(|e| Error::GasError(e.to_string()))?; validate_untrusted_wasm(code).map_err(Error::ValidationError)?; gas_meter + .borrow_mut() .add_compiling_gas(tx_len) .map_err(|e| Error::GasError(e.to_string()))?; match wasm_cache.compile_or_fetch(code)? { @@ -634,6 +622,7 @@ mod tests { use borsh_ext::BorshSerializeExt; use itertools::Either; + use namada_state::StorageWrite; use namada_test_utils::TestWasms; use namada_tx::data::TxType; use namada_tx::{Code, Data}; @@ -641,9 +630,8 @@ mod tests { use wasmer_vm::TrapCode; use super::*; - use crate::state::testing::TestStorage; + use crate::state::testing::TestState; use crate::tx::data::eval_vp::EvalVp; - use crate::types::hash::Hash; use crate::vm::host_env::TxRuntimeError; use crate::vm::wasm; @@ -698,7 +686,8 @@ mod tests { let downcasted_tx_rt_err: &TxRuntimeError = source_err .downcast_ref() .unwrap_or_else(|| panic!("{assert_msg}: {source_err}")); - let TxRuntimeError::MemoryError(tx_mem_err) = downcasted_tx_rt_err else { + let TxRuntimeError::MemoryError(tx_mem_err) = downcasted_tx_rt_err + else { panic!("{assert_msg}: {downcasted_tx_rt_err}"); }; tx_mem_err @@ -758,9 +747,9 @@ mod tests { /// wasm execution, the execution is aborted. #[test] fn test_tx_memory_limiter_in_guest() { - let storage = TestStorage::default(); - let mut write_log = WriteLog::default(); - let mut gas_meter = TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()); + let mut state = TestState::default(); + let gas_meter = + RefCell::new(TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into())); let tx_index = TxIndex::default(); // This code will allocate memory of the given size @@ -770,8 +759,8 @@ mod tests { let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); let code_len = (tx_code.len() as u64).serialize_to_vec(); - write_log.write(&key, tx_code.clone()).unwrap(); - write_log.write(&len_key, code_len).unwrap(); + state.write_log_mut().write(&key, tx_code.clone()).unwrap(); + state.write_log_mut().write(&len_key, code_len).unwrap(); // Assuming 200 pages, 12.8 MiB limit assert_eq!(memory::TX_MEMORY_MAX_PAGES, 200); @@ -787,9 +776,8 @@ mod tests { outer_tx.set_code(Code::new(tx_code.clone(), None)); outer_tx.set_data(Data::new(tx_data)); let result = tx( - &storage, - &mut write_log, - &mut gas_meter, + &mut state, + &gas_meter, &tx_index, &outer_tx, &mut vp_cache, @@ -804,9 +792,8 @@ mod tests { outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); let error = tx( - &storage, - &mut write_log, - &mut gas_meter, + &mut state, + &gas_meter, &tx_index, &outer_tx, &mut vp_cache, @@ -822,12 +809,11 @@ mod tests { /// fails and hence returns `false`. #[test] fn test_vp_memory_limiter_in_guest_calling_eval() { - let mut storage = TestStorage::default(); - let addr = storage.address_gen.generate_address("rng seed"); - let write_log = WriteLog::default(); - let mut gas_meter = VpGasMeter::new_from_tx_meter( + let mut state = TestState::default(); + let addr = state.in_mem_mut().address_gen.generate_address("rng seed"); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let keys_changed = BTreeSet::new(); let verifiers = BTreeSet::new(); let tx_index = TxIndex::default(); @@ -839,8 +825,8 @@ mod tests { let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); let code_len = (vp_eval.len() as u64).serialize_to_vec(); - storage.write(&key, vp_eval).unwrap(); - storage.write(&len_key, code_len).unwrap(); + state.write_bytes(&key, vp_eval).unwrap(); + state.write_bytes(&len_key, code_len).unwrap(); // This code will allocate memory of the given size let vp_memory_limit = TestWasms::VpMemoryLimit.read_bytes(); // store the wasm code @@ -848,8 +834,8 @@ mod tests { let key = Key::wasm_code(&limit_code_hash); let len_key = Key::wasm_code_len(&limit_code_hash); let code_len = (vp_memory_limit.len() as u64).serialize_to_vec(); - storage.write(&key, vp_memory_limit).unwrap(); - storage.write(&len_key, code_len).unwrap(); + state.write_bytes(&key, vp_memory_limit).unwrap(); + state.write_bytes(&len_key, code_len).unwrap(); // Assuming 200 pages, 12.8 MiB limit assert_eq!(memory::VP_MEMORY_MAX_PAGES, 200); @@ -858,7 +844,7 @@ mod tests { // shouldn't fail let input = 2_usize.pow(23).serialize_to_vec(); - let mut tx = Tx::new(storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(vec![], None).add_serialized_data(input); let eval_vp = EvalVp { @@ -866,7 +852,7 @@ mod tests { input: tx, }; - let mut outer_tx = Tx::new(storage.chain_id.clone(), None); + let mut outer_tx = Tx::new(state.in_mem().chain_id.clone(), None); outer_tx.add_code(vec![], None).add_data(eval_vp); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); @@ -877,9 +863,8 @@ mod tests { &outer_tx, &tx_index, &addr, - &storage, - &write_log, - &mut gas_meter, + &state, + &gas_meter, &keys_changed, &verifiers, vp_cache.clone(), @@ -890,7 +875,7 @@ mod tests { // Allocating `2^24` (16 MiB) should be above the memory limit and // should fail let input = 2_usize.pow(24).serialize_to_vec(); - let mut tx = Tx::new(storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(vec![], None).add_data(input); let eval_vp = EvalVp { @@ -898,7 +883,7 @@ mod tests { input: tx, }; - let mut outer_tx = Tx::new(storage.chain_id.clone(), None); + let mut outer_tx = Tx::new(state.in_mem().chain_id.clone(), None); outer_tx.add_code(vec![], None).add_data(eval_vp); // When the `eval`ed VP runs out of memory, its result should be @@ -909,9 +894,8 @@ mod tests { &outer_tx, &tx_index, &addr, - &storage, - &write_log, - &mut gas_meter, + &state, + &gas_meter, &keys_changed, &verifiers, vp_cache, @@ -925,12 +909,11 @@ mod tests { /// inside the wasm execution, the execution is aborted. #[test] fn test_vp_memory_limiter_in_guest() { - let mut storage = TestStorage::default(); - let addr = storage.address_gen.generate_address("rng seed"); - let write_log = WriteLog::default(); - let mut gas_meter = VpGasMeter::new_from_tx_meter( + let mut state = TestState::default(); + let addr = state.in_mem_mut().address_gen.generate_address("rng seed"); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let keys_changed = BTreeSet::new(); let verifiers = BTreeSet::new(); let tx_index = TxIndex::default(); @@ -942,8 +925,8 @@ mod tests { let code_len = (vp_code.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - storage.write(&key, vp_code).unwrap(); - storage.write(&len_key, code_len).unwrap(); + state.write_bytes(&key, vp_code).unwrap(); + state.write_bytes(&len_key, code_len).unwrap(); // Assuming 200 pages, 12.8 MiB limit assert_eq!(memory::VP_MEMORY_MAX_PAGES, 200); @@ -952,7 +935,7 @@ mod tests { // shouldn't fail let tx_data = 2_usize.pow(23).serialize_to_vec(); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_data(Data::new(tx_data)); outer_tx.set_code(Code::new(vec![], None)); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); @@ -961,9 +944,8 @@ mod tests { &outer_tx, &tx_index, &addr, - &storage, - &write_log, - &mut gas_meter, + &state, + &gas_meter, &keys_changed, &verifiers, vp_cache.clone(), @@ -974,16 +956,15 @@ mod tests { // should fail let tx_data = 2_usize.pow(24).serialize_to_vec(); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_data(Data::new(tx_data)); let error = vp( code_hash, &outer_tx, &tx_index, &addr, - &storage, - &write_log, - &mut gas_meter, + &state, + &gas_meter, &keys_changed, &verifiers, vp_cache, @@ -997,9 +978,9 @@ mod tests { /// host input, the execution fails. #[test] fn test_tx_memory_limiter_in_host_input() { - let storage = TestStorage::default(); - let mut write_log = WriteLog::default(); - let mut gas_meter = TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()); + let mut state = TestState::default(); + let gas_meter = + RefCell::new(TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into())); let tx_index = TxIndex::default(); let tx_no_op = TestWasms::TxNoOp.read_bytes(); @@ -1008,8 +989,8 @@ mod tests { let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); let code_len = (tx_no_op.len() as u64).serialize_to_vec(); - write_log.write(&key, tx_no_op.clone()).unwrap(); - write_log.write(&len_key, code_len).unwrap(); + state.write_log_mut().write(&key, tx_no_op.clone()).unwrap(); + state.write_log_mut().write(&len_key, code_len).unwrap(); // Assuming 200 pages, 12.8 MiB limit assert_eq!(memory::TX_MEMORY_MAX_PAGES, 200); @@ -1026,9 +1007,8 @@ mod tests { outer_tx.set_code(Code::new(tx_no_op, None)); outer_tx.set_data(Data::new(tx_data)); let result = tx( - &storage, - &mut write_log, - &mut gas_meter, + &mut state, + &gas_meter, &tx_index, &outer_tx, &mut vp_cache, @@ -1057,12 +1037,11 @@ mod tests { /// in the host input, the execution fails. #[test] fn test_vp_memory_limiter_in_host_input() { - let mut storage = TestStorage::default(); - let addr = storage.address_gen.generate_address("rng seed"); - let write_log = WriteLog::default(); - let mut gas_meter = VpGasMeter::new_from_tx_meter( + let mut state = TestState::default(); + let addr = state.in_mem_mut().address_gen.generate_address("rng seed"); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let keys_changed = BTreeSet::new(); let verifiers = BTreeSet::new(); let tx_index = TxIndex::default(); @@ -1073,8 +1052,8 @@ mod tests { let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); let code_len = (vp_code.len() as u64).serialize_to_vec(); - storage.write(&key, vp_code).unwrap(); - storage.write(&len_key, code_len).unwrap(); + state.write_bytes(&key, vp_code).unwrap(); + state.write_bytes(&len_key, code_len).unwrap(); // Assuming 200 pages, 12.8 MiB limit assert_eq!(memory::VP_MEMORY_MAX_PAGES, 200); @@ -1084,7 +1063,7 @@ mod tests { let len = 2_usize.pow(24); let tx_data: Vec = vec![6_u8; len]; let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_data(Data::new(tx_data)); outer_tx.set_code(Code::new(vec![], None)); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); @@ -1093,9 +1072,8 @@ mod tests { &outer_tx, &tx_index, &addr, - &storage, - &write_log, - &mut gas_meter, + &state, + &gas_meter, &keys_changed, &verifiers, vp_cache, @@ -1126,9 +1104,9 @@ mod tests { /// execution is aborted. #[test] fn test_tx_memory_limiter_in_host_env() { - let mut storage = TestStorage::default(); - let mut write_log = WriteLog::default(); - let mut gas_meter = TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()); + let mut state = TestState::default(); + let gas_meter = + RefCell::new(TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into())); let tx_index = TxIndex::default(); let tx_read_key = TestWasms::TxReadStorageKey.read_bytes(); @@ -1137,8 +1115,11 @@ mod tests { let code_len = (tx_read_key.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - write_log.write(&key, tx_read_key.clone()).unwrap(); - write_log.write(&len_key, code_len).unwrap(); + state + .write_log_mut() + .write(&key, tx_read_key.clone()) + .unwrap(); + state.write_log_mut().write(&len_key, code_len).unwrap(); // Allocating `2^24` (16 MiB) for a value in storage that the tx // attempts to read should be above the memory limit and should @@ -1150,7 +1131,7 @@ mod tests { // Write the value that should be read by the tx into the storage. When // writing directly to storage, the value has to be encoded with // Borsh. - storage.write(&key, value.serialize_to_vec()).unwrap(); + state.write_bytes(&key, value.serialize_to_vec()).unwrap(); let tx_data = key.serialize_to_vec(); let (mut vp_cache, _) = wasm::compilation_cache::common::testing::cache(); @@ -1160,9 +1141,8 @@ mod tests { outer_tx.set_code(Code::new(tx_read_key, None)); outer_tx.set_data(Data::new(tx_data)); let error = tx( - &storage, - &mut write_log, - &mut gas_meter, + &mut state, + &gas_meter, &tx_index, &outer_tx, &mut vp_cache, @@ -1178,12 +1158,11 @@ mod tests { /// execution, the execution is aborted. #[test] fn test_vp_memory_limiter_in_host_env() { - let mut storage = TestStorage::default(); - let addr = storage.address_gen.generate_address("rng seed"); - let write_log = WriteLog::default(); - let mut gas_meter = VpGasMeter::new_from_tx_meter( + let mut state = TestState::default(); + let addr = state.in_mem_mut().address_gen.generate_address("rng seed"); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let keys_changed = BTreeSet::new(); let verifiers = BTreeSet::new(); let tx_index = TxIndex::default(); @@ -1194,8 +1173,8 @@ mod tests { let code_len = (vp_read_key.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - storage.write(&key, vp_read_key).unwrap(); - storage.write(&len_key, code_len).unwrap(); + state.write_bytes(&key, vp_read_key).unwrap(); + state.write_bytes(&len_key, code_len).unwrap(); // Allocating `2^24` (16 MiB) for a value in storage that the tx // attempts to read should be above the memory limit and should @@ -1207,10 +1186,10 @@ mod tests { // Write the value that should be read by the tx into the storage. When // writing directly to storage, the value has to be encoded with // Borsh. - storage.write(&key, value.serialize_to_vec()).unwrap(); + state.write_bytes(&key, value.serialize_to_vec()).unwrap(); let tx_data = key.serialize_to_vec(); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_data(Data::new(tx_data)); outer_tx.set_code(Code::new(vec![], None)); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); @@ -1219,9 +1198,8 @@ mod tests { &outer_tx, &tx_index, &addr, - &storage, - &write_log, - &mut gas_meter, + &state, + &gas_meter, &keys_changed, &verifiers, vp_cache, @@ -1237,12 +1215,11 @@ mod tests { /// and hence returns `false`. #[test] fn test_vp_memory_limiter_in_host_env_inside_guest_calling_eval() { - let mut storage = TestStorage::default(); - let addr = storage.address_gen.generate_address("rng seed"); - let write_log = WriteLog::default(); - let mut gas_meter = VpGasMeter::new_from_tx_meter( + let mut state = TestState::default(); + let addr = state.in_mem_mut().address_gen.generate_address("rng seed"); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let keys_changed = BTreeSet::new(); let verifiers = BTreeSet::new(); let tx_index = TxIndex::default(); @@ -1254,8 +1231,8 @@ mod tests { let code_len = (vp_eval.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - storage.write(&key, vp_eval).unwrap(); - storage.write(&len_key, code_len).unwrap(); + state.write_bytes(&key, vp_eval).unwrap(); + state.write_bytes(&len_key, code_len).unwrap(); // This code will read value from the storage let vp_read_key = TestWasms::VpReadStorageKey.read_bytes(); // store the wasm code @@ -1263,8 +1240,8 @@ mod tests { let code_len = (vp_read_key.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&read_code_hash); let len_key = Key::wasm_code_len(&read_code_hash); - storage.write(&key, vp_read_key).unwrap(); - storage.write(&len_key, code_len).unwrap(); + state.write_bytes(&key, vp_read_key).unwrap(); + state.write_bytes(&len_key, code_len).unwrap(); // Allocating `2^24` (16 MiB) for a value in storage that the tx // attempts to read should be above the memory limit and should @@ -1276,10 +1253,10 @@ mod tests { // Write the value that should be read by the tx into the storage. When // writing directly to storage, the value has to be encoded with // Borsh. - storage.write(&key, value.serialize_to_vec()).unwrap(); + state.write_bytes(&key, value.serialize_to_vec()).unwrap(); let input = 2_usize.pow(23).serialize_to_vec(); - let mut tx = Tx::new(storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(vec![], None).add_serialized_data(input); let eval_vp = EvalVp { @@ -1287,7 +1264,7 @@ mod tests { input: tx, }; - let mut outer_tx = Tx::new(storage.chain_id.clone(), None); + let mut outer_tx = Tx::new(state.in_mem().chain_id.clone(), None); outer_tx.add_code(vec![], None).add_data(eval_vp); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); @@ -1296,9 +1273,8 @@ mod tests { &outer_tx, &tx_index, &addr, - &storage, - &write_log, - &mut gas_meter, + &state, + &gas_meter, &keys_changed, &verifiers, vp_cache, @@ -1310,9 +1286,9 @@ mod tests { fn execute_tx_with_code(tx_code: Vec) -> Result> { let tx_data = vec![]; let tx_index = TxIndex::default(); - let storage = TestStorage::default(); - let mut write_log = WriteLog::default(); - let mut gas_meter = TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()); + let mut state = TestState::default(); + let gas_meter = + RefCell::new(TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into())); let (mut vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let (mut tx_cache, _) = @@ -1323,17 +1299,16 @@ mod tests { let code_len = (tx_code.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - write_log.write(&key, tx_code).unwrap(); - write_log.write(&len_key, code_len).unwrap(); + state.write_log_mut().write(&key, tx_code).unwrap(); + state.write_log_mut().write(&len_key, code_len).unwrap(); let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::from_hash(code_hash, None)); outer_tx.set_data(Data::new(tx_data)); tx( - &storage, - &mut write_log, - &mut gas_meter, + &mut state, + &gas_meter, &tx_index, &outer_tx, &mut vp_cache, @@ -1410,12 +1385,11 @@ mod tests { let outer_tx = Tx::from_type(TxType::Raw); let tx_index = TxIndex::default(); - let mut storage = TestStorage::default(); - let addr = storage.address_gen.generate_address("rng seed"); - let write_log = WriteLog::default(); - let mut gas_meter = VpGasMeter::new_from_tx_meter( + let mut state = TestState::default(); + let addr = state.in_mem_mut().address_gen.generate_address("rng seed"); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let keys_changed = BTreeSet::new(); let verifiers = BTreeSet::new(); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); @@ -1424,17 +1398,16 @@ mod tests { let code_len = (vp_code.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - storage.write(&key, vp_code).unwrap(); - storage.write(&len_key, code_len).unwrap(); + state.write_bytes(&key, vp_code).unwrap(); + state.write_bytes(&len_key, code_len).unwrap(); vp( code_hash, &outer_tx, &tx_index, &addr, - &storage, - &write_log, - &mut gas_meter, + &state, + &gas_meter, &keys_changed, &verifiers, vp_cache, diff --git a/crates/parameters/src/lib.rs b/crates/parameters/src/lib.rs index 8fd77ad58f..b0c0a93410 100644 --- a/crates/parameters/src/lib.rs +++ b/crates/parameters/src/lib.rs @@ -3,15 +3,15 @@ pub mod storage; mod wasm_allowlist; use std::collections::BTreeMap; -use namada_core::types::address::{Address, InternalAddress}; -use namada_core::types::chain::ProposalBytes; -use namada_core::types::dec::Dec; -use namada_core::types::hash::Hash; -pub use namada_core::types::parameters::*; -use namada_core::types::storage::Key; -use namada_core::types::time::DurationSecs; -use namada_core::types::token; -use namada_storage::{self, ResultExt, StorageRead, StorageWrite}; +use namada_core::address::{Address, InternalAddress}; +use namada_core::chain::ProposalBytes; +use namada_core::dec::Dec; +use namada_core::hash::Hash; +pub use namada_core::parameters::*; +use namada_core::storage::Key; +use namada_core::time::DurationSecs; +use namada_core::token; +use namada_storage::{ResultExt, StorageRead, StorageWrite}; pub use storage::get_max_block_gas; use thiserror::Error; pub use wasm_allowlist::{is_tx_allowed, is_vp_allowed}; @@ -26,7 +26,7 @@ pub enum ReadError { #[error("Storage error: {0}")] StorageError(namada_storage::Error), #[error("Storage type error: {0}")] - StorageTypeError(namada_core::types::storage::Error), + StorageTypeError(namada_core::storage::Error), #[error("Protocol parameters are missing, they must be always set")] ParametersMissing, } diff --git a/crates/parameters/src/storage.rs b/crates/parameters/src/storage.rs index 7bacb2c6fa..abf3fa743f 100644 --- a/crates/parameters/src/storage.rs +++ b/crates/parameters/src/storage.rs @@ -1,7 +1,7 @@ //! Parameters storage -use namada_core::types::address::Address; -use namada_core::types::storage::{DbKeySeg, Key}; +use namada_core::address::Address; +use namada_core::storage::{DbKeySeg, Key}; use namada_macros::StorageKeys; use namada_storage::StorageRead; diff --git a/crates/parameters/src/wasm_allowlist.rs b/crates/parameters/src/wasm_allowlist.rs index e75b9b292b..c22b4b6fc1 100644 --- a/crates/parameters/src/wasm_allowlist.rs +++ b/crates/parameters/src/wasm_allowlist.rs @@ -1,5 +1,5 @@ -use namada_core::types::hash::Hash; -use namada_core::types::storage; +use namada_core::hash::Hash; +use namada_core::storage; use namada_storage::{Result, StorageRead}; use crate::storage::{ diff --git a/crates/proof_of_stake/src/epoched.rs b/crates/proof_of_stake/src/epoched.rs index 5db83af399..f218c74e5a 100644 --- a/crates/proof_of_stake/src/epoched.rs +++ b/crates/proof_of_stake/src/epoched.rs @@ -7,8 +7,7 @@ use std::marker::PhantomData; use std::{cmp, ops}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use namada_core::types::storage::{self, Epoch}; -use namada_storage; +use namada_core::storage::{self, Epoch}; use namada_storage::collections::lazy_map::{LazyMap, NestedMap}; use namada_storage::collections::{self, LazyCollection}; use namada_storage::{StorageRead, StorageWrite}; @@ -1085,10 +1084,10 @@ pub trait EpochOffset: #[cfg(test)] mod test { - use namada_core::types::address::testing::established_address_1; - use namada_core::types::dec::Dec; - use namada_core::types::{key, token}; - use namada_state::testing::TestWlStorage; + use namada_core::address::testing::established_address_1; + use namada_core::dec::Dec; + use namada_core::{key, token}; + use namada_state::testing::TestState; use test_log::test; use super::*; @@ -1392,8 +1391,8 @@ mod test { Ok(()) } - fn init_storage() -> namada_storage::Result { - let mut s = TestWlStorage::default(); + fn init_storage() -> namada_storage::Result { + let mut s = TestState::default(); let gov_params = namada_governance::parameters::GovernanceParameters::default(); gov_params.init_storage(&mut s)?; diff --git a/crates/proof_of_stake/src/error.rs b/crates/proof_of_stake/src/error.rs index 6516809575..c692e0decf 100644 --- a/crates/proof_of_stake/src/error.rs +++ b/crates/proof_of_stake/src/error.rs @@ -1,9 +1,9 @@ /// Custom error types use std::num::TryFromIntError; -use namada_core::types::address::Address; -use namada_core::types::dec::Dec; -use namada_core::types::storage::Epoch; +use namada_core::address::Address; +use namada_core::dec::Dec; +use namada_core::storage::Epoch; use thiserror::Error; use crate::rewards; diff --git a/crates/proof_of_stake/src/lib.rs b/crates/proof_of_stake/src/lib.rs index 6fb618db4f..4eaf8fc281 100644 --- a/crates/proof_of_stake/src/lib.rs +++ b/crates/proof_of_stake/src/lib.rs @@ -27,11 +27,13 @@ use std::cmp::{self}; use std::collections::{BTreeMap, BTreeSet, HashSet}; pub use error::*; -use namada_core::types::address::{Address, InternalAddress}; -use namada_core::types::dec::Dec; -use namada_core::types::key::common; -use namada_core::types::storage::BlockHeight; -pub use namada_core::types::storage::{Epoch, Key, KeySeg}; +use namada_core::address::{Address, InternalAddress}; +use namada_core::dec::Dec; +use namada_core::event::EmitEvents; +use namada_core::key::common; +use namada_core::storage::BlockHeight; +pub use namada_core::storage::{Epoch, Key, KeySeg}; +use namada_core::tendermint::abci::types::Misbehavior; use namada_storage::collections::lazy_map::{self, Collectable, LazyMap}; use namada_storage::{StorageRead, StorageWrite}; pub use namada_trans_token as token; @@ -162,7 +164,7 @@ where pub fn is_delegator( storage: &S, address: &Address, - epoch: Option, + epoch: Option, ) -> namada_storage::Result where S: StorageRead, @@ -312,8 +314,7 @@ where consensus_validator_set_handle() .at(&epoch) .iter(storage)? - .fold(Ok(token::Amount::zero()), |acc, entry| { - let acc = acc?; + .try_fold(token::Amount::zero(), |acc, entry| { let ( lazy_map::NestedSubKey::Data { key: amount, @@ -2484,13 +2485,9 @@ where #[cfg(any(test, feature = "testing"))] /// PoS related utility functions to help set up tests. pub mod test_utils { - use namada_storage; - use namada_storage::{StorageRead, StorageWrite}; use namada_trans_token::credit_tokens; use super::*; - use crate::parameters::PosParams; - use crate::storage::read_non_pos_owned_params; use crate::types::GenesisValidator; /// Helper function to initialize storage with PoS data @@ -2499,7 +2496,7 @@ pub mod test_utils { storage: &mut S, params: &PosParams, validators: impl Iterator, - current_epoch: namada_core::types::storage::Epoch, + current_epoch: namada_core::storage::Epoch, ) -> namada_storage::Result<()> where S: StorageRead + StorageWrite, @@ -2562,7 +2559,7 @@ pub mod test_utils { storage: &mut S, owned: OwnedPosParams, validators: impl Iterator + Clone, - current_epoch: namada_core::types::storage::Epoch, + current_epoch: namada_core::storage::Epoch, ) -> namada_storage::Result where S: StorageRead + StorageWrite, @@ -2768,3 +2765,109 @@ where } Ok(()) } + +/// Apply PoS updates for a block +pub fn finalize_block( + storage: &mut S, + _events: &mut impl EmitEvents, + is_new_epoch: bool, + validator_set_update_epoch: Epoch, + votes: Vec, + byzantine_validators: Vec, +) -> namada_storage::Result<()> +where + S: StorageWrite + StorageRead, +{ + let height = storage.get_block_height()?; + let current_epoch = storage.get_block_epoch()?; + let pos_params = storage::read_pos_params(storage)?; + + if is_new_epoch { + // Copy the new_epoch + pipeline_len - 1 validator set into + // new_epoch + pipeline_len + validator_set_update::copy_validator_sets_and_positions( + storage, + &pos_params, + current_epoch, + current_epoch + pos_params.pipeline_len, + )?; + + // Compute the total stake of the consensus validator set and record + // it in storage + compute_and_store_total_consensus_stake(storage, current_epoch)?; + } + + // Invariant: Has to be applied before `record_slashes_from_evidence` + // because it potentially needs to be able to read validator state from + // previous epoch and jailing validator removes the historical state + if !votes.is_empty() { + rewards::log_block_rewards( + storage, + votes.clone(), + height, + current_epoch, + is_new_epoch, + )?; + } + + // Invariant: This has to be applied after + // `copy_validator_sets_and_positions` and before `self.update_epoch`. + slashing::record_slashes_from_evidence( + storage, + byzantine_validators, + &pos_params, + current_epoch, + validator_set_update_epoch, + )?; + + // Invariant: This has to be applied after + // `copy_validator_sets_and_positions` if we're starting a new epoch + if is_new_epoch { + // Invariant: Process slashes before inflation as they may affect + // the rewards in the current epoch. + + // Process and apply slashes that have already been recorded for the + // current epoch + if let Err(err) = slashing::process_slashes(storage, current_epoch) { + tracing::error!( + "Error while processing slashes queued for epoch {}: {}", + current_epoch, + err + ); + panic!("Error while processing slashes"); + } + } + + // Consensus set liveness check + if !votes.is_empty() { + let vote_height = height.prev_height(); + let epoch_of_votes = + storage.get_pred_epochs()?.get_epoch(vote_height).expect( + "Should always find an epoch when looking up the vote height \ + before recording liveness data.", + ); + record_liveness_data( + storage, + &votes, + epoch_of_votes, + vote_height, + &pos_params, + )?; + } + + // Jail validators for inactivity + jail_for_liveness( + storage, + &pos_params, + current_epoch, + validator_set_update_epoch, + )?; + + if is_new_epoch { + // Prune liveness data from validators that are no longer in the + // consensus set + prune_liveness_data(storage, current_epoch)?; + } + + Ok(()) +} diff --git a/crates/proof_of_stake/src/parameters.rs b/crates/proof_of_stake/src/parameters.rs index 6062f11a5b..30d60b2b3e 100644 --- a/crates/proof_of_stake/src/parameters.rs +++ b/crates/proof_of_stake/src/parameters.rs @@ -3,10 +3,10 @@ use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::dec::Dec; -use namada_core::types::storage::Epoch; -use namada_core::types::token; -use namada_core::types::uint::Uint; +use namada_core::dec::Dec; +use namada_core::storage::Epoch; +use namada_core::token; +use namada_core::uint::Uint; use namada_governance::parameters::GovernanceParameters; use thiserror::Error; @@ -297,7 +297,6 @@ mod tests { /// Testing helpers #[cfg(any(test, feature = "testing"))] pub mod testing { - use namada_core::types::dec::Dec; use proptest::prelude::*; use super::*; diff --git a/crates/proof_of_stake/src/pos_queries.rs b/crates/proof_of_stake/src/pos_queries.rs index f76d4c6ed8..7496435081 100644 --- a/crates/proof_of_stake/src/pos_queries.rs +++ b/crates/proof_of_stake/src/pos_queries.rs @@ -1,10 +1,10 @@ //! Storage API for querying data about Proof-of-stake related //! data. This includes validator and epoch related data. -use namada_core::types::address::Address; -use namada_core::types::chain::ProposalBytes; -use namada_core::types::storage::{BlockHeight, Epoch}; -use namada_core::types::{key, token}; +use namada_core::address::Address; +use namada_core::chain::ProposalBytes; +use namada_core::storage::{BlockHeight, Epoch}; +use namada_core::{key, token}; use namada_parameters::storage::get_max_proposal_bytes_key; use namada_storage::collections::lazy_map::NestedSubKey; use namada_storage::StorageRead; @@ -82,9 +82,7 @@ pub struct PosQueriesHook<'db, S> { impl<'db, S> Clone for PosQueriesHook<'db, S> { fn clone(&self) -> Self { - Self { - storage: self.storage, - } + *self } } @@ -116,7 +114,7 @@ where let epoch = epoch.unwrap_or_else(|| self.storage.get_block_epoch().unwrap()); ConsensusValidators { - wl_storage: self.storage, + state: self.storage, validator_set: consensus_validator_set_handle().at(&epoch), } } @@ -259,7 +257,7 @@ pub struct ConsensusValidators<'db, S> where S: StorageRead, { - wl_storage: &'db S, + state: &'db S, validator_set: ConsensusValidatorSet, } @@ -273,7 +271,7 @@ where &'this self, ) -> impl Iterator + 'db { self.validator_set - .iter(self.wl_storage) + .iter(self.state) .expect("Must be able to iterate over consensus validators") .map(|res| { let ( diff --git a/crates/proof_of_stake/src/queries.rs b/crates/proof_of_stake/src/queries.rs index 0b98810cba..361f82d258 100644 --- a/crates/proof_of_stake/src/queries.rs +++ b/crates/proof_of_stake/src/queries.rs @@ -4,12 +4,12 @@ use std::cmp; use std::collections::{BTreeMap, HashMap, HashSet}; use borsh::BorshDeserialize; -use namada_core::types::address::Address; -use namada_core::types::dec::Dec; -use namada_core::types::storage::Epoch; -use namada_core::types::token; +use namada_core::address::Address; +use namada_core::dec::Dec; +use namada_core::storage::Epoch; +use namada_core::token; use namada_storage::collections::lazy_map::{NestedSubKey, SubKey}; -use namada_storage::{self, StorageRead}; +use namada_storage::StorageRead; use crate::slashing::{find_validator_slashes, get_slashed_amount}; use crate::storage::{bond_handle, read_pos_params, unbond_handle}; diff --git a/crates/proof_of_stake/src/rewards.rs b/crates/proof_of_stake/src/rewards.rs index 78e7accdb4..e9c8ab72e7 100644 --- a/crates/proof_of_stake/src/rewards.rs +++ b/crates/proof_of_stake/src/rewards.rs @@ -2,12 +2,11 @@ use std::collections::{HashMap, HashSet}; -use namada_core::ledger::inflation; -use namada_core::types::address::{self, Address}; -use namada_core::types::dec::Dec; -use namada_core::types::storage::Epoch; -use namada_core::types::token::{self, Amount}; -use namada_core::types::uint::{Uint, I256}; +use namada_core::address::{self, Address}; +use namada_core::dec::Dec; +use namada_core::storage::{BlockHeight, Epoch}; +use namada_core::token::{self, Amount}; +use namada_core::uint::{Uint, I256}; use namada_parameters::storage as params_storage; use namada_storage::collections::lazy_map::NestedSubKey; use namada_storage::{ResultExt, StorageRead, StorageWrite}; @@ -19,12 +18,12 @@ use crate::storage::{ rewards_accumulator_handle, validator_commission_rate_handle, validator_rewards_products_handle, validator_state_handle, }; -use crate::token::credit_tokens; use crate::token::storage_key::minted_balance_key; +use crate::token::{credit_tokens, inflation}; use crate::types::{into_tm_voting_power, BondId, ValidatorState, VoteInfo}; use crate::{ bond_amounts_for_rewards, get_total_consensus_stake, staking_token_address, - storage_key, InflationError, PosParams, + storage, storage_key, InflationError, PosParams, }; /// This is equal to 0.01. @@ -122,10 +121,50 @@ impl PosRewardsCalculator { } } +/// Process the proposer and votes in the block to assign their PoS rewards. +pub(crate) fn log_block_rewards( + storage: &mut S, + votes: Vec, + height: BlockHeight, + current_epoch: Epoch, + new_epoch: bool, +) -> namada_storage::Result<()> +where + S: StorageWrite + StorageRead, +{ + // Read the block proposer of the previously committed block in storage + // (n-1 if we are in the process of finalizing n right now). + match storage::read_last_block_proposer_address(storage)? { + Some(proposer_address) => { + tracing::debug!("Found last block proposer: {proposer_address}"); + log_block_rewards_aux( + storage, + if new_epoch { + current_epoch.prev() + } else { + current_epoch + }, + &proposer_address, + votes, + )?; + } + None => { + if height > BlockHeight::default().next_height() { + tracing::error!( + "Can't find the last block proposer at height {height}" + ); + } else { + tracing::debug!("No last block proposer at height {height}"); + } + } + } + Ok(()) +} + /// Tally a running sum of the fraction of rewards owed to each validator in /// the consensus set. This is used to keep track of the rewards due to each /// consensus validator over the lifetime of an epoch. -pub fn log_block_rewards( +pub(crate) fn log_block_rewards_aux( storage: &mut S, epoch: impl Into, proposer_address: &Address, diff --git a/crates/proof_of_stake/src/slashing.rs b/crates/proof_of_stake/src/slashing.rs index 3d7361c3cf..513027fe49 100644 --- a/crates/proof_of_stake/src/slashing.rs +++ b/crates/proof_of_stake/src/slashing.rs @@ -4,10 +4,12 @@ use std::cmp::{self, Reverse}; use std::collections::{BTreeMap, BTreeSet, HashMap}; use borsh::BorshDeserialize; -use namada_core::types::address::Address; -use namada_core::types::dec::Dec; -use namada_core::types::storage::Epoch; -use namada_core::types::token; +use namada_core::address::Address; +use namada_core::dec::Dec; +use namada_core::key::tm_raw_hash_to_string; +use namada_core::storage::{BlockHeight, Epoch}; +use namada_core::tendermint::abci::types::{Misbehavior, MisbehaviorKind}; +use namada_core::token; use namada_storage::collections::lazy_map::{ Collectable, NestedMap, NestedSubKey, SubKey, }; @@ -30,10 +32,104 @@ use crate::types::{ use crate::validator_set_update::update_validator_set; use crate::{ fold_and_slash_redelegated_bonds, get_total_consensus_stake, - jail_validator, storage_key, EagerRedelegatedUnbonds, + jail_validator, storage, storage_key, types, EagerRedelegatedUnbonds, FoldRedelegatedBondsResult, OwnedPosParams, PosParams, }; +/// Apply PoS slashes from the evidence +pub(crate) fn record_slashes_from_evidence( + storage: &mut S, + byzantine_validators: Vec, + pos_params: &PosParams, + current_epoch: Epoch, + validator_set_update_epoch: Epoch, +) -> namada_storage::Result<()> +where + S: StorageWrite + StorageRead, +{ + if !byzantine_validators.is_empty() { + let pred_epochs = storage.get_pred_epochs()?; + for evidence in byzantine_validators { + // dbg!(&evidence); + tracing::info!("Processing evidence {evidence:?}."); + let evidence_height = u64::from(evidence.height); + let evidence_epoch = + match pred_epochs.get_epoch(BlockHeight(evidence_height)) { + Some(epoch) => epoch, + None => { + tracing::error!( + "Couldn't find epoch for evidence block height {}", + evidence_height + ); + continue; + } + }; + // Disregard evidences that should have already been processed + // at this time + if evidence_epoch + pos_params.slash_processing_epoch_offset() + - pos_params.cubic_slashing_window_length + <= current_epoch + { + tracing::info!( + "Skipping outdated evidence from epoch {evidence_epoch}" + ); + continue; + } + let slash_type = match evidence.kind { + MisbehaviorKind::DuplicateVote => { + types::SlashType::DuplicateVote + } + MisbehaviorKind::LightClientAttack => { + types::SlashType::LightClientAttack + } + MisbehaviorKind::Unknown => { + tracing::error!("Unknown evidence: {:#?}", evidence); + continue; + } + }; + let validator_raw_hash = + tm_raw_hash_to_string(evidence.validator.address); + let validator = match storage::find_validator_by_raw_hash( + storage, + &validator_raw_hash, + )? { + Some(validator) => validator, + None => { + tracing::error!( + "Cannot find validator's address from raw hash {}", + validator_raw_hash + ); + continue; + } + }; + // Check if we're gonna switch to a new epoch after a delay + tracing::info!( + "Slashing {} for {} in epoch {}, block height {} (current \ + epoch = {}, validator set update epoch = \ + {validator_set_update_epoch})", + validator, + slash_type, + evidence_epoch, + evidence_height, + current_epoch + ); + if let Err(err) = slash( + storage, + pos_params, + current_epoch, + evidence_epoch, + evidence_height, + slash_type, + &validator, + validator_set_update_epoch, + ) { + tracing::error!("Error in slashing: {}", err); + } + } + } + Ok(()) +} + /// Record a slash for a misbehavior that has been received from Tendermint and /// then jail the validator, removing it from the validator set. The slash rate /// will be computed at a later epoch. @@ -1101,10 +1197,8 @@ where ); let processing_epoch = epoch + params.slash_processing_epoch_offset(); let slashes = enqueued_slashes_handle().at(&processing_epoch); - let infracting_stake = slashes.iter(storage)?.fold( - Ok(Dec::zero()), - |acc: namada_storage::Result, res| { - let acc = acc?; + let infracting_stake = + slashes.iter(storage)?.try_fold(Dec::zero(), |acc, res| { let ( NestedSubKey::Data { key: validator, @@ -1118,9 +1212,10 @@ where // tracing::debug!("Val {} stake: {}", &validator, // validator_stake); - Ok(acc + Dec::from(validator_stake)) - }, - )?; + Ok::( + acc + Dec::from(validator_stake), + ) + })?; sum_vp_fraction += infracting_stake / consensus_stake; } let cubic_rate = diff --git a/crates/proof_of_stake/src/storage.rs b/crates/proof_of_stake/src/storage.rs index 36c0af84ac..9cae1168a9 100644 --- a/crates/proof_of_stake/src/storage.rs +++ b/crates/proof_of_stake/src/storage.rs @@ -4,11 +4,11 @@ use std::collections::{BTreeSet, HashSet}; use namada_account::protocol_pk_key; -use namada_core::types::address::Address; -use namada_core::types::dec::Dec; -use namada_core::types::key::{common, tm_consensus_key_raw_hash}; -use namada_core::types::storage::Epoch; -use namada_core::types::token; +use namada_core::address::Address; +use namada_core::dec::Dec; +use namada_core::key::{common, tm_consensus_key_raw_hash}; +use namada_core::storage::Epoch; +use namada_core::token; use namada_governance::storage::get_max_proposal_period; use namada_storage::collections::lazy_map::NestedSubKey; use namada_storage::collections::{LazyCollection, LazySet}; @@ -391,7 +391,7 @@ where pub fn read_validator_deltas_value( storage: &S, validator: &Address, - epoch: &namada_core::types::storage::Epoch, + epoch: &namada_core::storage::Epoch, ) -> namada_storage::Result> where S: StorageRead, @@ -407,7 +407,7 @@ pub fn read_validator_stake( storage: &S, params: &PosParams, validator: &Address, - epoch: namada_core::types::storage::Epoch, + epoch: namada_core::storage::Epoch, ) -> namada_storage::Result where S: StorageRead, @@ -429,7 +429,7 @@ pub fn update_validator_deltas( params: &OwnedPosParams, validator: &Address, delta: token::Change, - current_epoch: namada_core::types::storage::Epoch, + current_epoch: namada_core::storage::Epoch, offset_opt: Option, ) -> namada_storage::Result<()> where @@ -453,7 +453,7 @@ where pub fn read_total_stake( storage: &S, params: &PosParams, - epoch: namada_core::types::storage::Epoch, + epoch: namada_core::storage::Epoch, ) -> namada_storage::Result where S: StorageRead, @@ -472,7 +472,7 @@ where /// Read all addresses from consensus validator set. pub fn read_consensus_validator_set_addresses( storage: &S, - epoch: namada_core::types::storage::Epoch, + epoch: namada_core::storage::Epoch, ) -> namada_storage::Result> where S: StorageRead, @@ -487,7 +487,7 @@ where /// Read all addresses from below-capacity validator set. pub fn read_below_capacity_validator_set_addresses( storage: &S, - epoch: namada_core::types::storage::Epoch, + epoch: namada_core::storage::Epoch, ) -> namada_storage::Result> where S: StorageRead, @@ -502,7 +502,7 @@ where /// Read all addresses from the below-threshold set pub fn read_below_threshold_validator_set_addresses( storage: &S, - epoch: namada_core::types::storage::Epoch, + epoch: namada_core::storage::Epoch, ) -> namada_storage::Result> where S: StorageRead, @@ -524,7 +524,7 @@ where /// Read all addresses from consensus validator set with their stake. pub fn read_consensus_validator_set_addresses_with_stake( storage: &S, - epoch: namada_core::types::storage::Epoch, + epoch: namada_core::storage::Epoch, ) -> namada_storage::Result> where S: StorageRead, @@ -554,7 +554,7 @@ where /// Count the number of consensus validators pub fn get_num_consensus_validators( storage: &S, - epoch: namada_core::types::storage::Epoch, + epoch: namada_core::storage::Epoch, ) -> namada_storage::Result where S: StorageRead, @@ -568,7 +568,7 @@ where /// Read all addresses from below-capacity validator set with their stake. pub fn read_below_capacity_validator_set_addresses_with_stake( storage: &S, - epoch: namada_core::types::storage::Epoch, + epoch: namada_core::storage::Epoch, ) -> namada_storage::Result> where S: StorageRead, @@ -598,7 +598,7 @@ where /// Read all validator addresses. pub fn read_all_validator_addresses( storage: &S, - epoch: namada_core::types::storage::Epoch, + epoch: namada_core::storage::Epoch, ) -> namada_storage::Result> where S: StorageRead, @@ -615,7 +615,7 @@ pub fn update_total_deltas( storage: &mut S, params: &OwnedPosParams, delta: token::Change, - current_epoch: namada_core::types::storage::Epoch, + current_epoch: namada_core::storage::Epoch, offset_opt: Option, ) -> namada_storage::Result<()> where diff --git a/crates/proof_of_stake/src/storage_key.rs b/crates/proof_of_stake/src/storage_key.rs index 6135468a95..4efb24a06b 100644 --- a/crates/proof_of_stake/src/storage_key.rs +++ b/crates/proof_of_stake/src/storage_key.rs @@ -1,7 +1,7 @@ //! Proof-of-Stake storage keys and storage integration. -use namada_core::types::address::Address; -use namada_core::types::storage::{DbKeySeg, Epoch, Key, KeySeg}; +use namada_core::address::Address; +use namada_core::storage::{DbKeySeg, Epoch, Key, KeySeg}; use namada_storage::collections::{lazy_map, lazy_vec}; use super::ADDRESS; @@ -61,7 +61,7 @@ const LIVENESS_MISSED_VOTES_SUM: &str = "sum_missed_votes"; /// Is the given key a PoS storage key? pub fn is_pos_key(key: &Key) -> bool { - match &key.segments.get(0) { + match &key.segments.first() { Some(DbKeySeg::AddressSeg(addr)) => addr == &ADDRESS, _ => false, } diff --git a/crates/proof_of_stake/src/tests/helpers.rs b/crates/proof_of_stake/src/tests/helpers.rs index d66b64c504..f199c13e85 100644 --- a/crates/proof_of_stake/src/tests/helpers.rs +++ b/crates/proof_of_stake/src/tests/helpers.rs @@ -1,14 +1,14 @@ use std::cmp::max; use std::ops::Range; -use namada_core::types::address::testing::address_from_simple_seed; -use namada_core::types::dec::Dec; -use namada_core::types::key::testing::common_sk_from_simple_seed; -use namada_core::types::key::{self, RefTo}; -use namada_core::types::storage::Epoch; -use namada_core::types::token; -use namada_core::types::token::testing::arb_amount_non_zero_ceiled; -use namada_state::testing::TestWlStorage; +use namada_core::address::testing::address_from_simple_seed; +use namada_core::dec::Dec; +use namada_core::key::testing::common_sk_from_simple_seed; +use namada_core::key::{self, RefTo}; +use namada_core::storage::Epoch; +use namada_core::token; +use namada_core::token::testing::arb_amount_non_zero_ceiled; +use namada_state::testing::TestState; use proptest::prop_oneof; use proptest::strategy::{Just, Strategy}; @@ -48,7 +48,7 @@ pub fn test_slashes_with_unbonding_params() } pub fn get_tendermint_set_updates( - s: &TestWlStorage, + s: &TestState, params: &PosParams, Epoch(epoch): Epoch, ) -> Vec { @@ -61,9 +61,9 @@ pub fn get_tendermint_set_updates( } /// Advance to the next epoch. Returns the new epoch. -pub fn advance_epoch(s: &mut TestWlStorage, params: &PosParams) -> Epoch { - s.storage.block.epoch = s.storage.block.epoch.next(); - let current_epoch = s.storage.block.epoch; +pub fn advance_epoch(s: &mut TestState, params: &PosParams) -> Epoch { + s.in_mem_mut().block.epoch = s.in_mem().block.epoch.next(); + let current_epoch = s.in_mem().block.epoch; compute_and_store_total_consensus_stake(s, current_epoch).unwrap(); copy_validator_sets_and_positions( s, diff --git a/crates/proof_of_stake/src/tests/state_machine.rs b/crates/proof_of_stake/src/tests/state_machine.rs index 3dc34c36fa..7bf14ce6d5 100644 --- a/crates/proof_of_stake/src/tests/state_machine.rs +++ b/crates/proof_of_stake/src/tests/state_machine.rs @@ -6,14 +6,14 @@ use std::ops::Deref; use assert_matches::assert_matches; use itertools::Itertools; -use namada_core::types::address::{self, Address}; -use namada_core::types::dec::Dec; -use namada_core::types::key; -use namada_core::types::key::common::PublicKey; -use namada_core::types::storage::Epoch; -use namada_core::types::token::Change; +use namada_core::address::{self, Address}; +use namada_core::dec::Dec; +use namada_core::key; +use namada_core::key::common::PublicKey; +use namada_core::storage::Epoch; +use namada_core::token::Change; use namada_governance::parameters::GovernanceParameters; -use namada_state::testing::TestWlStorage; +use namada_state::testing::TestState; use namada_storage::collections::lazy_map::{ Collectable, NestedSubKey, SubKey, }; @@ -163,7 +163,7 @@ struct AbstractPosState { #[derive(Debug)] struct ConcretePosState { /// Storage - contains all the PoS state - s: TestWlStorage, + s: TestState, } /// State machine transitions @@ -225,7 +225,7 @@ impl StateMachineTest for ConcretePosState { .map(|val| &val.address) .collect::>() ); - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); initial_state.gov_params.init_storage(&mut s).unwrap(); crate::test_utils::test_init_genesis( &mut s, @@ -245,7 +245,7 @@ impl StateMachineTest for ConcretePosState { let params = crate::read_pos_params(&state.s).unwrap(); let pos_balance = read_balance( &state.s, - &state.s.storage.native_token, + &state.s.in_mem().native_token, &crate::ADDRESS, ) .unwrap(); @@ -256,7 +256,7 @@ impl StateMachineTest for ConcretePosState { advance_epoch(&mut state.s, ¶ms); // Need to apply some slashing - let current_epoch = state.s.storage.block.epoch; + let current_epoch = state.s.in_mem().block.epoch; crate::slashing::process_slashes(&mut state.s, current_epoch) .unwrap(); @@ -771,7 +771,7 @@ impl StateMachineTest for ConcretePosState { impl ConcretePosState { fn current_epoch(&self) -> Epoch { - self.s.storage.block.epoch + self.s.in_mem().block.epoch } fn check_next_epoch_post_conditions(&self, params: &PosParams) { @@ -1443,7 +1443,7 @@ impl ConcretePosState { params: &PosParams, validator: &Address, ) { - let current_epoch = self.s.storage.block.epoch; + let current_epoch = self.s.in_mem().block.epoch; // Make sure the validator is not in either set until the pipeline epoch for epoch in current_epoch.iter_range(params.pipeline_len) { diff --git a/crates/proof_of_stake/src/tests/state_machine_v2.rs b/crates/proof_of_stake/src/tests/state_machine_v2.rs index 872625f9d5..9a2d1234d7 100644 --- a/crates/proof_of_stake/src/tests/state_machine_v2.rs +++ b/crates/proof_of_stake/src/tests/state_machine_v2.rs @@ -7,14 +7,14 @@ use std::{cmp, mem}; use assert_matches::assert_matches; use derivative::Derivative; use itertools::Itertools; -use namada_core::types::address::{self, Address}; -use namada_core::types::dec::Dec; -use namada_core::types::key; -use namada_core::types::key::common::PublicKey; -use namada_core::types::storage::Epoch; -use namada_core::types::token::Change; +use namada_core::address::{self, Address}; +use namada_core::dec::Dec; +use namada_core::key; +use namada_core::key::common::PublicKey; +use namada_core::storage::Epoch; +use namada_core::token::Change; use namada_governance::parameters::GovernanceParameters; -use namada_state::testing::TestWlStorage; +use namada_state::testing::TestState; use namada_storage::collections::lazy_map::{NestedSubKey, SubKey}; use namada_storage::StorageRead; use proptest::prelude::*; @@ -529,7 +529,7 @@ impl AbstractPosState { !is_withdrawable }) } - records.withdrawn.extend(to_store.into_iter()); + records.withdrawn.extend(to_store); } /// Get or insert default mutable records @@ -1867,7 +1867,7 @@ impl Unbond { #[derivative(Debug)] struct ConcretePosState { /// Storage - contains all the PoS state - s: TestWlStorage, + s: TestState, /// Last reference state in debug format to print changes after transitions #[derivative(Debug = "ignore")] last_state_diff: DbgPrintDiff, @@ -1937,7 +1937,7 @@ impl StateMachineTest for ConcretePosState { .map(|val| &val.address) .collect::>() ); - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); initial_state.gov_params.init_storage(&mut s).unwrap(); crate::test_utils::init_genesis_helper( &mut s, @@ -1973,7 +1973,7 @@ impl StateMachineTest for ConcretePosState { let params = crate::read_pos_params(&state.s).unwrap(); let pos_balance = read_balance( &state.s, - &state.s.storage.native_token, + &state.s.in_mem().native_token, &crate::ADDRESS, ) .unwrap(); @@ -1984,7 +1984,7 @@ impl StateMachineTest for ConcretePosState { advance_epoch(&mut state.s, ¶ms); // Need to apply some slashing - let current_epoch = state.s.storage.block.epoch; + let current_epoch = state.s.in_mem().block.epoch; crate::slashing::process_slashes(&mut state.s, current_epoch) .unwrap(); @@ -2724,7 +2724,7 @@ impl StateMachineTest for ConcretePosState { impl ConcretePosState { fn current_epoch(&self) -> Epoch { - self.s.storage.block.epoch + self.s.in_mem().block.epoch } fn check_next_epoch_post_conditions(&self, params: &PosParams) { @@ -3119,7 +3119,7 @@ impl ConcretePosState { params: &PosParams, validator: &Address, ) { - let current_epoch = self.s.storage.block.epoch; + let current_epoch = self.s.in_mem().block.epoch; // Make sure the validator is not in either set until the pipeline epoch for epoch in current_epoch.iter_range(params.pipeline_len) { diff --git a/crates/proof_of_stake/src/tests/test_helper_fns.rs b/crates/proof_of_stake/src/tests/test_helper_fns.rs index 5148224412..98c2ac37b0 100644 --- a/crates/proof_of_stake/src/tests/test_helper_fns.rs +++ b/crates/proof_of_stake/src/tests/test_helper_fns.rs @@ -1,12 +1,12 @@ use std::collections::{BTreeMap, BTreeSet}; -use namada_core::types::address::testing::{ +use namada_core::address::testing::{ established_address_1, established_address_2, established_address_3, }; -use namada_core::types::dec::Dec; -use namada_core::types::storage::{Epoch, Key}; -use namada_core::types::token; -use namada_state::testing::TestWlStorage; +use namada_core::dec::Dec; +use namada_core::storage::{Epoch, Key}; +use namada_core::token; +use namada_state::testing::TestState; use namada_storage::collections::lazy_map::NestedMap; use namada_storage::collections::LazyCollection; @@ -35,7 +35,7 @@ use crate::{ /// `iterateBondsUpToAmountTest` #[test] fn test_find_bonds_to_remove() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let gov_params = namada_governance::parameters::GovernanceParameters::default(); gov_params.init_storage(&mut storage).unwrap(); @@ -119,7 +119,7 @@ fn test_find_bonds_to_remove() { /// `computeModifiedRedelegationTest` #[test] fn test_compute_modified_redelegation() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let validator1 = established_address_1(); let validator2 = established_address_2(); let owner = established_address_3(); @@ -293,7 +293,7 @@ fn test_compute_modified_redelegation() { /// `computeBondAtEpochTest` #[test] fn test_compute_bond_at_epoch() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { pipeline_len: 2, unbonding_len: 4, @@ -443,7 +443,7 @@ fn test_compute_bond_at_epoch() { /// `computeSlashBondAtEpochTest` #[test] fn test_compute_slash_bond_at_epoch() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { pipeline_len: 2, unbonding_len: 4, @@ -541,7 +541,7 @@ fn test_compute_slash_bond_at_epoch() { /// `computeNewRedelegatedUnbondsTest` #[test] fn test_compute_new_redelegated_unbonds() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let alice = established_address_1(); let bob = established_address_2(); @@ -811,7 +811,7 @@ fn test_compute_slashable_amount() { /// `foldAndSlashRedelegatedBondsMapTest` #[test] fn test_fold_and_slash_redelegated_bonds() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() @@ -910,7 +910,7 @@ fn test_fold_and_slash_redelegated_bonds() { /// `slashRedelegationTest` #[test] fn test_slash_redelegation() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() @@ -1098,7 +1098,7 @@ fn test_slash_redelegation() { /// `slashValidatorRedelegationTest` #[test] fn test_slash_validator_redelegation() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() @@ -1280,7 +1280,7 @@ fn test_slash_validator_redelegation() { /// `slashValidatorTest` #[test] fn test_slash_validator() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() @@ -1680,7 +1680,7 @@ fn test_slash_validator() { /// `computeAmountAfterSlashingUnbondTest` #[test] fn test_compute_amount_after_slashing_unbond() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() @@ -1798,7 +1798,7 @@ fn test_compute_amount_after_slashing_unbond() { /// `computeAmountAfterSlashingWithdrawTest` #[test] fn test_compute_amount_after_slashing_withdraw() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() @@ -1927,9 +1927,9 @@ fn test_compute_amount_after_slashing_withdraw() { /// SM test case 1 from Brent #[test] fn test_from_sm_case_1() { - use namada_core::types::address::testing::established_address_4; + use namada_core::address::testing::established_address_4; - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let gov_params = namada_governance::parameters::GovernanceParameters::default(); gov_params.init_storage(&mut storage).unwrap(); diff --git a/crates/proof_of_stake/src/tests/test_pos.rs b/crates/proof_of_stake/src/tests/test_pos.rs index 45b4f3a980..afd40941ed 100644 --- a/crates/proof_of_stake/src/tests/test_pos.rs +++ b/crates/proof_of_stake/src/tests/test_pos.rs @@ -3,15 +3,13 @@ use std::collections::{BTreeMap, HashSet}; use assert_matches::assert_matches; -use namada_core::types::address::Address; -use namada_core::types::dec::Dec; -use namada_core::types::key::testing::{ - common_sk_from_simple_seed, gen_keypair, -}; -use namada_core::types::key::RefTo; -use namada_core::types::storage::{BlockHeight, Epoch}; -use namada_core::types::{address, key}; -use namada_state::testing::TestWlStorage; +use namada_core::address::Address; +use namada_core::dec::Dec; +use namada_core::key::testing::{common_sk_from_simple_seed, gen_keypair}; +use namada_core::key::RefTo; +use namada_core::storage::{BlockHeight, Epoch}; +use namada_core::{address, key}; +use namada_state::testing::TestState; use namada_storage::collections::lazy_map::Collectable; use namada_storage::StorageRead; use proptest::prelude::*; @@ -24,7 +22,7 @@ use crate::parameters::testing::arb_pos_params; use crate::parameters::OwnedPosParams; use crate::queries::bonds_and_unbonds; use crate::rewards::{ - log_block_rewards, update_rewards_products_and_mint_inflation, + log_block_rewards_aux, update_rewards_products_and_mint_inflation, PosRewardsCalculator, }; use crate::slashing::{process_slashes, slash}; @@ -120,18 +118,18 @@ proptest! { } proptest! { - // Generate arb valid input for `test_log_block_rewards_aux` + // Generate arb valid input for `test_log_block_rewards_aux_aux` #![proptest_config(Config { cases: 1, .. Config::default() })] #[test] - fn test_log_block_rewards( + fn test_log_block_rewards_aux( genesis_validators in arb_genesis_validators(4..10, None), params in arb_pos_params(Some(5)) ) { - test_log_block_rewards_aux(genesis_validators, params) + test_log_block_rewards_aux_aux(genesis_validators, params) } } @@ -205,8 +203,8 @@ fn test_test_init_genesis_aux( // "Test inputs: {params:?}, {start_epoch}, genesis validators: \ // {validators:#?}" // ); - let mut s = TestWlStorage::default(); - s.storage.block.epoch = start_epoch; + let mut s = TestState::default(); + s.in_mem_mut().block.epoch = start_epoch; validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); let params = test_init_genesis( @@ -289,11 +287,11 @@ fn test_bonds_aux(params: OwnedPosParams, validators: Vec) { // params.unbonding_len = 4; // println!("\nTest inputs: {params:?}, genesis validators: // {validators:#?}"); - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); // Genesis - let start_epoch = s.storage.block.epoch; - let mut current_epoch = s.storage.block.epoch; + let start_epoch = s.in_mem().block.epoch; + let mut current_epoch = s.in_mem().block.epoch; let params = test_init_genesis( &mut s, params, @@ -563,7 +561,7 @@ fn test_bonds_aux(params: OwnedPosParams, validators: Vec) { let unbonded_genesis_self_bond = amount_self_unbond - amount_self_bond != token::Amount::zero(); - let self_unbond_epoch = s.storage.block.epoch; + let self_unbond_epoch = s.in_mem().block.epoch; unbond_tokens( &mut s, @@ -825,7 +823,7 @@ fn test_unjail_validator_aux( ) { // println!("\nTest inputs: {params:?}, genesis validators: // {validators:#?}"); - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); // Find the validator with the most stake and 100x his stake to keep the // cubic slash rate small @@ -838,7 +836,7 @@ fn test_unjail_validator_aux( // let val_tokens = validators[num_vals - 2].tokens; // Genesis - let mut current_epoch = s.storage.block.epoch; + let mut current_epoch = s.in_mem().block.epoch; let params = test_init_genesis( &mut s, params, @@ -962,14 +960,14 @@ fn test_unjail_validator_aux( } fn test_unslashed_bond_amount_aux(validators: Vec) { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() }; // Genesis - let mut current_epoch = storage.storage.block.epoch; + let mut current_epoch = storage.in_mem().block.epoch; let params = test_init_genesis( &mut storage, params, @@ -1143,7 +1141,7 @@ fn test_unslashed_bond_amount_aux(validators: Vec) { } } -fn test_log_block_rewards_aux( +fn test_log_block_rewards_aux_aux( validators: Vec, params: OwnedPosParams, ) { @@ -1155,9 +1153,9 @@ fn test_log_block_rewards_aux( .map(|v| (&v.address, v.tokens.to_string_native())) .collect::>() ); - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); // Init genesis - let current_epoch = s.storage.block.epoch; + let current_epoch = s.in_mem().block.epoch; let params = test_init_genesis( &mut s, params, @@ -1231,7 +1229,7 @@ fn test_log_block_rewards_aux( }; let (votes, signing_stake, non_voters) = prep_votes(current_epoch); - log_block_rewards( + log_block_rewards_aux( &mut s, current_epoch, &proposer_address, @@ -1374,9 +1372,9 @@ fn test_update_rewards_products_aux(validators: Vec) { .map(|v| (&v.address, v.tokens.to_string_native())) .collect::>() ); - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); // Init genesis - let current_epoch = s.storage.block.epoch; + let current_epoch = s.in_mem().block.epoch; let params = OwnedPosParams::default(); let params = test_init_genesis( &mut s, @@ -1466,10 +1464,10 @@ fn test_consensus_key_change_aux(validators: Vec) { // println!("\nTest inputs: {params:?}, genesis validators: // {validators:#?}"); - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); // Genesis - let mut current_epoch = storage.storage.block.epoch; + let mut current_epoch = storage.in_mem().block.epoch; let params = test_init_genesis( &mut storage, params, @@ -1544,7 +1542,7 @@ fn test_consensus_key_change_aux(validators: Vec) { change_consensus_key(&mut storage, &validator, &ck_3, current_epoch) .unwrap(); - let staking_token = storage.storage.native_token.clone(); + let staking_token = storage.in_mem().native_token.clone(); let amount_del = token::Amount::native_whole(5); credit_tokens(&mut storage, &staking_token, &validator, amount_del) .unwrap(); @@ -1593,14 +1591,14 @@ fn test_is_delegator_aux(mut validators: Vec) { let validator1 = validators[0].address.clone(); let validator2 = validators[1].address.clone(); - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() }; // Genesis - let mut current_epoch = storage.storage.block.epoch; + let mut current_epoch = storage.in_mem().block.epoch; let params = test_init_genesis( &mut storage, params, @@ -1701,13 +1699,13 @@ fn test_jail_for_liveness_aux(validators: Vec) { let missed_votes = 1_u64; // Open 2 storages - let mut storage = TestWlStorage::default(); - let mut storage_clone = TestWlStorage::default(); + let mut storage = TestState::default(); + let mut storage_clone = TestState::default(); // Apply the same changes to each storage for s in [&mut storage, &mut storage_clone] { // Genesis - let current_epoch = s.storage.block.epoch; + let current_epoch = s.in_mem().block.epoch; let jail_epoch = current_epoch.next(); let params = test_init_genesis( s, @@ -1741,5 +1739,8 @@ fn test_jail_for_liveness_aux(validators: Vec) { } // Assert that the changes from `jail_for_liveness` are the same - pretty_assertions::assert_eq!(&storage.write_log, &storage_clone.write_log); + pretty_assertions::assert_eq!( + &storage.write_log(), + &storage_clone.write_log() + ); } diff --git a/crates/proof_of_stake/src/tests/test_slash_and_redel.rs b/crates/proof_of_stake/src/tests/test_slash_and_redel.rs index e9118b1472..34a9c574d8 100644 --- a/crates/proof_of_stake/src/tests/test_slash_and_redel.rs +++ b/crates/proof_of_stake/src/tests/test_slash_and_redel.rs @@ -2,11 +2,11 @@ use std::ops::Deref; use std::str::FromStr; use assert_matches::assert_matches; -use namada_core::types::address; -use namada_core::types::dec::Dec; -use namada_core::types::storage::{BlockHeight, Epoch}; -use namada_core::types::token::NATIVE_MAX_DECIMAL_PLACES; -use namada_state::testing::TestWlStorage; +use namada_core::address; +use namada_core::dec::Dec; +use namada_core::storage::{BlockHeight, Epoch}; +use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; +use namada_state::testing::TestState; use namada_storage::collections::lazy_map::Collectable; use namada_storage::StorageRead; use proptest::prelude::*; @@ -66,14 +66,14 @@ fn test_simple_redelegation_aux( let src_validator = validators[0].address.clone(); let dest_validator = validators[1].address.clone(); - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() }; // Genesis - let mut current_epoch = storage.storage.block.epoch; + let mut current_epoch = storage.in_mem().block.epoch; let params = test_init_genesis( &mut storage, params, @@ -293,7 +293,7 @@ fn test_slashes_with_unbonding_aux( params.unbonding_len = 4; // println!("\nTest inputs: {params:?}, genesis validators: // {validators:#?}"); - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); // Find the validator with the least stake to avoid the cubic slash rate // going to 100% @@ -305,8 +305,8 @@ fn test_slashes_with_unbonding_aux( let val_tokens = validator.tokens; // Genesis - // let start_epoch = s.storage.block.epoch; - let mut current_epoch = s.storage.block.epoch; + // let start_epoch = s.in_mem().block.epoch; + let mut current_epoch = s.in_mem().block.epoch; let params = test_init_genesis( &mut s, params, @@ -461,7 +461,7 @@ fn test_redelegation_with_slashing_aux( let src_validator = validators[0].address.clone(); let dest_validator = validators[1].address.clone(); - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, // Avoid empty consensus set by removing the threshold @@ -470,7 +470,7 @@ fn test_redelegation_with_slashing_aux( }; // Genesis - let mut current_epoch = storage.storage.block.epoch; + let mut current_epoch = storage.in_mem().block.epoch; let params = test_init_genesis( &mut storage, params, @@ -682,14 +682,14 @@ fn test_chain_redelegations_aux(mut validators: Vec) { let dest_validator_2 = validators[2].address.clone(); let _init_stake_dest_2 = validators[2].tokens; - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() }; // Genesis - let mut current_epoch = storage.storage.block.epoch; + let mut current_epoch = storage.in_mem().block.epoch; let params = test_init_genesis( &mut storage, params, @@ -1073,10 +1073,10 @@ fn test_overslashing_aux(mut validators: Vec) { // println!("\nTest inputs: {params:?}, genesis validators: // {validators:#?}"); - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); // Genesis - let mut current_epoch = storage.storage.block.epoch; + let mut current_epoch = storage.in_mem().block.epoch; let params = test_init_genesis( &mut storage, params, @@ -1087,7 +1087,7 @@ fn test_overslashing_aux(mut validators: Vec) { storage.commit_block().unwrap(); // Get a delegator with some tokens - let staking_token = storage.storage.native_token.clone(); + let staking_token = storage.in_mem().native_token.clone(); let delegator = address::testing::gen_implicit_address(); let amount_del = token::Amount::native_whole(5); credit_tokens(&mut storage, &staking_token, &delegator, amount_del) @@ -1243,7 +1243,7 @@ proptest! { } fn test_slashed_bond_amount_aux(validators: Vec) { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, validator_stake_threshold: token::Amount::zero(), @@ -1260,7 +1260,7 @@ fn test_slashed_bond_amount_aux(validators: Vec) { validators[0].tokens = (init_tot_stake - val1_init_stake) / 30; // Genesis - let mut current_epoch = storage.storage.block.epoch; + let mut current_epoch = storage.in_mem().block.epoch; let params = test_init_genesis( &mut storage, params, diff --git a/crates/proof_of_stake/src/tests/test_validator.rs b/crates/proof_of_stake/src/tests/test_validator.rs index 4ddb17c7c6..da7b44a86d 100644 --- a/crates/proof_of_stake/src/tests/test_validator.rs +++ b/crates/proof_of_stake/src/tests/test_validator.rs @@ -1,15 +1,15 @@ use std::cmp::min; -use namada_core::types::address::testing::arb_established_address; -use namada_core::types::address::{self, Address, EstablishedAddressGen}; -use namada_core::types::dec::Dec; -use namada_core::types::key::testing::{ +use namada_core::address::testing::arb_established_address; +use namada_core::address::{self, Address, EstablishedAddressGen}; +use namada_core::dec::Dec; +use namada_core::key::testing::{ arb_common_keypair, common_sk_from_simple_seed, }; -use namada_core::types::key::{self, common, RefTo}; -use namada_core::types::storage::Epoch; -use namada_core::types::token; -use namada_state::testing::TestWlStorage; +use namada_core::key::{self, common, RefTo}; +use namada_core::storage::Epoch; +use namada_core::token; +use namada_state::testing::TestState; use namada_storage::collections::lazy_map; use proptest::prelude::*; use proptest::test_runner::Config; @@ -78,10 +78,10 @@ fn test_become_validator_aux( // validators: {validators:#?}" // ); - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); // Genesis - let mut current_epoch = s.storage.block.epoch; + let mut current_epoch = s.in_mem().block.epoch; let params = test_init_genesis( &mut s, params, @@ -273,7 +273,7 @@ fn test_become_validator_aux( #[test] fn test_validator_raw_hash() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let address = address::testing::established_address_1(); let consensus_sk = key::testing::keypair_1(); let consensus_pk = consensus_sk.to_public(); @@ -293,7 +293,7 @@ fn test_validator_raw_hash() { #[test] fn test_validator_sets() { - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); // Only 3 consensus validator slots let params = OwnedPosParams { max_validator_slots: 3, @@ -388,7 +388,7 @@ fn test_validator_sets() { .unwrap(); // A helper to insert a non-genesis validator - let insert_validator = |s: &mut TestWlStorage, + let insert_validator = |s: &mut TestState, addr, pk: &common::PublicKey, stake: token::Amount, @@ -976,7 +976,7 @@ fn test_validator_sets() { /// with 0 voting power, because it wasn't it its set before #[test] fn test_validator_sets_swap() { - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); // Only 2 consensus validator slots let params = OwnedPosParams { max_validator_slots: 2, @@ -1066,7 +1066,7 @@ fn test_validator_sets_swap() { .unwrap(); // A helper to insert a non-genesis validator - let insert_validator = |s: &mut TestWlStorage, + let insert_validator = |s: &mut TestState, addr, pk: &common::PublicKey, stake: token::Amount, @@ -1281,8 +1281,8 @@ fn test_purge_validator_information_aux(validators: Vec) { ..Default::default() }; - let mut s = TestWlStorage::default(); - let mut current_epoch = s.storage.block.epoch; + let mut s = TestState::default(); + let mut current_epoch = s.in_mem().block.epoch; // Genesis let gov_params = namada_governance::parameters::GovernanceParameters { @@ -1306,7 +1306,7 @@ fn test_purge_validator_information_aux(validators: Vec) { let validator_positions = validator_set_positions_handle(); let all_validator_addresses = validator_addresses_handle(); - let check_is_data = |storage: &TestWlStorage, start: Epoch, end: Epoch| { + let check_is_data = |storage: &TestState, start: Epoch, end: Epoch| { for ep in Epoch::iter_bounds_inclusive(start, end) { assert!(!consensus_val_set.at(&ep).is_empty(storage).unwrap()); // assert!(!below_cap_val_set.at(&ep).is_empty(storage). @@ -1325,7 +1325,7 @@ fn test_purge_validator_information_aux(validators: Vec) { for _ in 0..default_past_epochs { current_epoch = advance_epoch(&mut s, ¶ms); } - assert_eq!(s.storage.block.epoch.0, default_past_epochs); + assert_eq!(s.in_mem().block.epoch.0, default_past_epochs); assert_eq!(current_epoch.0, default_past_epochs); check_is_data( diff --git a/crates/proof_of_stake/src/types/mod.rs b/crates/proof_of_stake/src/types/mod.rs index b2a0d176ad..f0567a3aae 100644 --- a/crates/proof_of_stake/src/types/mod.rs +++ b/crates/proof_of_stake/src/types/mod.rs @@ -4,18 +4,17 @@ mod rev_order; use core::fmt::Debug; use std::collections::{BTreeMap, HashMap}; -use std::convert::TryFrom; use std::fmt::Display; use std::hash::Hash; use std::ops::Sub; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use namada_core::types::address::Address; -use namada_core::types::dec::Dec; -use namada_core::types::key::common; -use namada_core::types::storage::{Epoch, KeySeg}; -use namada_core::types::token; -use namada_core::types::token::Amount; +use namada_core::address::Address; +use namada_core::dec::Dec; +use namada_core::key::common; +use namada_core::storage::{Epoch, KeySeg}; +use namada_core::token; +use namada_core::token::Amount; use namada_storage::collections::lazy_map::NestedMap; use namada_storage::collections::{LazyMap, LazySet, LazyVec}; pub use rev_order::ReverseOrdTokenAmount; @@ -469,7 +468,7 @@ impl Display for WeightedValidator { pub struct Position(pub u64); impl KeySeg for Position { - fn parse(string: String) -> namada_core::types::storage::Result + fn parse(string: String) -> namada_core::storage::Result where Self: Sized, { @@ -481,7 +480,7 @@ impl KeySeg for Position { self.0.raw() } - fn to_db_key(&self) -> namada_core::types::storage::DbKeySeg { + fn to_db_key(&self) -> namada_core::storage::DbKeySeg { self.0.to_db_key() } } diff --git a/crates/proof_of_stake/src/types/rev_order.rs b/crates/proof_of_stake/src/types/rev_order.rs index 57619941d4..ef23fb9e53 100644 --- a/crates/proof_of_stake/src/types/rev_order.rs +++ b/crates/proof_of_stake/src/types/rev_order.rs @@ -1,5 +1,5 @@ -use namada_core::types::storage::KeySeg; -use namada_core::types::token; +use namada_core::storage::KeySeg; +use namada_core::token; /// A wrapper over `token::Amount`, whose `KeySeg` implementation has reverse /// order of the `token::Amount` type. @@ -27,7 +27,7 @@ fn invert(amount: token::Amount) -> token::Amount { } impl KeySeg for ReverseOrdTokenAmount { - fn parse(string: String) -> namada_core::types::storage::Result + fn parse(string: String) -> namada_core::storage::Result where Self: Sized, { @@ -39,7 +39,7 @@ impl KeySeg for ReverseOrdTokenAmount { invert(self.0).raw() } - fn to_db_key(&self) -> namada_core::types::storage::DbKeySeg { + fn to_db_key(&self) -> namada_core::storage::DbKeySeg { invert(self.0).to_db_key() } } diff --git a/crates/proof_of_stake/src/validator_set_update.rs b/crates/proof_of_stake/src/validator_set_update.rs index 474af4ddda..8d15820b88 100644 --- a/crates/proof_of_stake/src/validator_set_update.rs +++ b/crates/proof_of_stake/src/validator_set_update.rs @@ -2,10 +2,10 @@ use std::collections::{HashMap, HashSet}; -use namada_core::types::address::Address; -use namada_core::types::key::PublicKeyTmRawHash; -use namada_core::types::storage::Epoch; -use namada_core::types::token; +use namada_core::address::Address; +use namada_core::key::PublicKeyTmRawHash; +use namada_core::storage::Epoch; +use namada_core::token; use namada_storage::collections::lazy_map::{NestedSubKey, SubKey}; use namada_storage::{StorageRead, StorageWrite}; use num_traits::ops::checked::CheckedAdd; diff --git a/crates/replay_protection/Cargo.toml b/crates/replay_protection/Cargo.toml new file mode 100644 index 0000000000..010020a052 --- /dev/null +++ b/crates/replay_protection/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "namada_replay_protection" +description = "Namada replay protection" +resolver = "2" +authors.workspace = true +edition.workspace = true +documentation.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true +version.workspace = true + + +[dependencies] +namada_core = { path = "../core" } diff --git a/crates/core/src/ledger/replay_protection.rs b/crates/replay_protection/src/lib.rs similarity index 87% rename from crates/core/src/ledger/replay_protection.rs rename to crates/replay_protection/src/lib.rs index 5400d1d2d9..b014f39f02 100644 --- a/crates/core/src/ledger/replay_protection.rs +++ b/crates/replay_protection/src/lib.rs @@ -1,7 +1,7 @@ -//! Replay protection storage +//! Replay protection storage keys -use crate::types::hash::Hash; -use crate::types::storage::Key; +use namada_core::hash::Hash; +use namada_core::storage::Key; const ERROR_MSG: &str = "Cannot obtain a valid db key"; diff --git a/crates/sdk/Cargo.toml b/crates/sdk/Cargo.toml index 707dfa97de..0c620e3d92 100644 --- a/crates/sdk/Cargo.toml +++ b/crates/sdk/Cargo.toml @@ -43,6 +43,7 @@ testing = [ "namada_governance/testing", "namada_ibc/testing", "namada_proof_of_stake/testing", + "namada_storage/testing", "namada_tx/testing", "async-client", "proptest", @@ -131,6 +132,7 @@ namada_proof_of_stake = { path = "../proof_of_stake", default-features = false, "testing", ] } namada_state = { path = "../state", features = ["testing"] } +namada_storage = { path = "../storage", features = ["testing"] } namada_test_utils = { path = "../test_utils" } namada_tx = { path = "../tx", features = ["testing"]} namada_vote_ext = {path = "../vote_ext"} diff --git a/crates/sdk/src/args.rs b/crates/sdk/src/args.rs index cd793bf6d8..ce262b065e 100644 --- a/crates/sdk/src/args.rs +++ b/crates/sdk/src/args.rs @@ -4,16 +4,16 @@ use std::collections::HashMap; use std::path::PathBuf; use std::time::Duration as StdDuration; -use namada_core::types::address::Address; -use namada_core::types::chain::ChainId; -use namada_core::types::dec::Dec; -use namada_core::types::ethereum_events::EthAddress; -use namada_core::types::keccak::KeccakHash; -use namada_core::types::key::{common, SchemeType}; -use namada_core::types::masp::PaymentAddress; -use namada_core::types::storage::{BlockHeight, Epoch}; -use namada_core::types::time::DateTimeUtc; -use namada_core::types::{storage, token}; +use namada_core::address::Address; +use namada_core::chain::ChainId; +use namada_core::dec::Dec; +use namada_core::ethereum_events::EthAddress; +use namada_core::keccak::KeccakHash; +use namada_core::key::{common, SchemeType}; +use namada_core::masp::PaymentAddress; +use namada_core::storage::{BlockHeight, Epoch}; +use namada_core::time::DateTimeUtc; +use namada_core::{storage, token}; use namada_governance::cli::onchain::{ DefaultProposal, PgfFundingProposal, PgfStewardProposal, }; @@ -95,18 +95,18 @@ pub struct BpConversionTableEntry { impl NamadaTypes for SdkTypes { type AddrOrNativeToken = Address; type Address = Address; - type BalanceOwner = namada_core::types::masp::BalanceOwner; + type BalanceOwner = namada_core::masp::BalanceOwner; type BpConversionTable = HashMap; type ConfigRpcTendermintAddress = tendermint_rpc::Url; type Data = Vec; type EthereumAddress = (); - type Keypair = namada_core::types::key::common::SecretKey; - type PublicKey = namada_core::types::key::common::PublicKey; - type SpendingKey = namada_core::types::masp::ExtendedSpendingKey; + type Keypair = namada_core::key::common::SecretKey; + type PublicKey = namada_core::key::common::PublicKey; + type SpendingKey = namada_core::masp::ExtendedSpendingKey; type TendermintAddress = tendermint_rpc::Url; - type TransferSource = namada_core::types::masp::TransferSource; - type TransferTarget = namada_core::types::masp::TransferTarget; - type ViewingKey = namada_core::types::masp::ExtendedViewingKey; + type TransferSource = namada_core::masp::TransferSource; + type TransferTarget = namada_core::masp::TransferTarget; + type ViewingKey = namada_core::masp::ExtendedViewingKey; } /// Common query arguments diff --git a/crates/sdk/src/error.rs b/crates/sdk/src/error.rs index 094a7bfbe0..2db50e18e6 100644 --- a/crates/sdk/src/error.rs +++ b/crates/sdk/src/error.rs @@ -1,10 +1,11 @@ //! Generic Error Type for all of the Shared Crate -use namada_core::types::address::Address; -use namada_core::types::dec::Dec; -use namada_core::types::ethereum_events::EthAddress; -use namada_core::types::storage; -use namada_core::types::storage::Epoch; +use namada_core::address::Address; +use namada_core::dec::Dec; +use namada_core::ethereum_events::EthAddress; +use namada_core::event::EventError; +use namada_core::storage; +use namada_core::storage::Epoch; use namada_tx::Tx; use prost::EncodeError; use tendermint_rpc::Error as RpcError; @@ -60,23 +61,6 @@ pub enum PinnedBalanceError { InvalidViewingKey, } -/// Errors to do with emitting events. -#[derive(Error, Debug, Clone)] -pub enum EventError { - /// Error when parsing an event type - #[error("Invalid event type")] - InvalidEventType, - /// Error when parsing attributes from an event JSON. - #[error("Json missing `attributes` field")] - MissingAttributes, - /// Missing key in attributes. - #[error("Attributes missing key: {0}")] - MissingKey(String), - /// Missing value in attributes. - #[error("Attributes missing value: {0}")] - MissingValue(String), -} - /// Errors that deal with querying some kind of data #[derive(Error, Debug, Clone)] pub enum QueryError { diff --git a/crates/sdk/src/eth_bridge/bridge_pool.rs b/crates/sdk/src/eth_bridge/bridge_pool.rs index c8aeb0bb92..81fe057eb0 100644 --- a/crates/sdk/src/eth_bridge/bridge_pool.rs +++ b/crates/sdk/src/eth_bridge/bridge_pool.rs @@ -9,15 +9,15 @@ use borsh_ext::BorshSerializeExt; use ethbridge_bridge_contract::Bridge; use ethers::providers::Middleware; use futures::future::FutureExt; -use namada_core::types::address::{Address, InternalAddress}; -use namada_core::types::eth_abi::Encode; -use namada_core::types::eth_bridge_pool::{ +use namada_core::address::{Address, InternalAddress}; +use namada_core::eth_abi::Encode; +use namada_core::eth_bridge_pool::{ erc20_token_address, GasFee, PendingTransfer, TransferToEthereum, TransferToEthereumKind, }; -use namada_core::types::ethereum_events::EthAddress; -use namada_core::types::keccak::KeccakHash; -use namada_core::types::voting_power::FractionalVotingPower; +use namada_core::ethereum_events::EthAddress; +use namada_core::keccak::KeccakHash; +use namada_core::voting_power::FractionalVotingPower; use namada_ethereum_bridge::storage::bridge_pool::get_pending_key; use namada_token::storage_key::balance_key; use namada_token::Amount; @@ -744,9 +744,10 @@ mod recommendations { use std::collections::BTreeSet; use borsh::BorshDeserialize; - use namada_core::types::ethereum_events::Uint as EthUint; - use namada_core::types::storage::BlockHeight; - use namada_core::types::uint::{self, Uint, I256}; + use namada_core::ethereum_events::Uint as EthUint; + use namada_core::storage::BlockHeight; + use namada_core::uint::{self, Uint, I256}; + use namada_ethereum_bridge::storage::proof::BridgePoolRootProof; use namada_vote_ext::validator_set_update::{ EthAddrBook, VotingPowersMap, VotingPowersMapExt, }; @@ -756,8 +757,6 @@ mod recommendations { use crate::eth_bridge::storage::bridge_pool::{ get_nonce_key, get_signed_root_key, }; - use crate::eth_bridge::storage::proof::BridgePoolRootProof; - use crate::io::Io; const fn unsigned_transfer_fee() -> Uint { Uint::from_u64(37_500_u64) @@ -1220,7 +1219,7 @@ mod recommendations { #[cfg(test)] mod test_recommendations { - use namada_core::types::address::Address; + use namada_core::address; use super::*; use crate::io::StdIo; @@ -1243,7 +1242,7 @@ mod recommendations { amount: Default::default(), }, gas_fee: GasFee { - token: namada_core::types::address::nam(), + token: address::testing::nam(), amount: gas_amount.into(), payer: bertha_address(), }, @@ -1286,7 +1285,7 @@ mod recommendations { /// Add ETH to a conversion table. fn add_eth_to_conversion_table(&mut self) { self.conversion_table.insert( - namada_core::types::address::eth(), + address::testing::eth(), args::BpConversionTableEntry { alias: "ETH".into(), conversion_rate: 1e9, // 1 ETH = 1e9 GWEI @@ -1311,7 +1310,7 @@ mod recommendations { amount: Default::default(), }, gas_fee: GasFee { - token: namada_core::types::address::eth(), + token: address::testing::eth(), amount: 1_000_000_000_u64.into(), // 1 GWEI payer: bertha_address(), }, @@ -1347,8 +1346,7 @@ mod recommendations { ctx.expected_eligible.push(EligibleRecommendation { transfer_hash: ctx.pending.keccak256().to_string(), cost: transfer_fee() - - I256::try_from(ctx.pending.gas_fee.amount) - .expect("Test failed"), + - I256::from(ctx.pending.gas_fee.amount), pending_transfer: ctx.pending.clone(), }); }); @@ -1544,14 +1542,14 @@ mod recommendations { let conversion_table = { let mut t = HashMap::new(); t.insert( - namada_core::types::address::apfel(), + address::testing::apfel(), args::BpConversionTableEntry { alias: APFEL.into(), conversion_rate: APF_RATE, }, ); t.insert( - namada_core::types::address::schnitzel(), + address::testing::schnitzel(), args::BpConversionTableEntry { alias: SCHNITZEL.into(), conversion_rate: SCH_RATE, @@ -1566,15 +1564,13 @@ mod recommendations { let transfer_paid_in_apfel = { let mut pending = ctx.pending.clone(); pending.transfer.amount = 1.into(); - pending.gas_fee.token = - namada_core::types::address::apfel(); + pending.gas_fee.token = address::testing::apfel(); pending }; let transfer_paid_in_schnitzel = { let mut pending = ctx.pending.clone(); pending.transfer.amount = 2.into(); - pending.gas_fee.token = - namada_core::types::address::schnitzel(); + pending.gas_fee.token = address::testing::schnitzel(); pending }; // add the transfers to the pool, and expect them to @@ -1591,8 +1587,7 @@ mod recommendations { transfer_hash: pending.keccak256().to_string(), cost: transfer_fee() - I256::from((1e9 / rate).floor() as u64) - * I256::try_from(pending.gas_fee.amount) - .expect("Test failed"), + * I256::from(pending.gas_fee.amount), pending_transfer: pending, }); } diff --git a/crates/sdk/src/eth_bridge/mod.rs b/crates/sdk/src/eth_bridge/mod.rs index 9107798938..481c7fb8e5 100644 --- a/crates/sdk/src/eth_bridge/mod.rs +++ b/crates/sdk/src/eth_bridge/mod.rs @@ -8,12 +8,11 @@ use std::ops::ControlFlow; pub use ethers; use ethers::providers::Middleware; use itertools::Either; -pub use namada_core::ledger::eth_bridge::{ADDRESS, INTERNAL_ADDRESS}; -pub use namada_core::types::ethereum_structs as structs; +pub use namada_core::ethereum_structs as structs; pub use namada_ethereum_bridge::storage::eth_bridge_queries::*; pub use namada_ethereum_bridge::storage::parameters::*; pub use namada_ethereum_bridge::storage::wrapped_erc20s; -pub use namada_ethereum_bridge::*; +pub use namada_ethereum_bridge::{ADDRESS, *}; use num256::Uint256; use crate::control_flow::time::{ @@ -74,10 +73,7 @@ where .timeout(deadline, || async { let fut_syncing = client.syncing(); let fut_block_num = client.get_block_number(); - let Ok(status) = futures::try_join!( - fut_syncing, - fut_block_num, - ) else { + let Ok(status) = futures::try_join!(fut_syncing, fut_block_num,) else { return ControlFlow::Continue(()); }; ControlFlow::Break(match status { diff --git a/crates/sdk/src/eth_bridge/validator_set.rs b/crates/sdk/src/eth_bridge/validator_set.rs index c11b9b62c6..59248885c4 100644 --- a/crates/sdk/src/eth_bridge/validator_set.rs +++ b/crates/sdk/src/eth_bridge/validator_set.rs @@ -10,10 +10,10 @@ use data_encoding::HEXLOWER; use ethbridge_bridge_contract::Bridge; use ethers::providers::Middleware; use futures::future::{self, FutureExt}; +use namada_core::eth_abi::EncodeCell; +use namada_core::ethereum_events::EthAddress; use namada_core::hints; -use namada_core::types::eth_abi::EncodeCell; -use namada_core::types::ethereum_events::EthAddress; -use namada_core::types::storage::Epoch; +use namada_core::storage::Epoch; use namada_ethereum_bridge::storage::proof::EthereumProof; use namada_vote_ext::validator_set_update::{ ValidatorSetArgs, VotingPowersMap, @@ -24,7 +24,7 @@ use crate::control_flow::install_shutdown_signal; use crate::control_flow::time::{self, Duration, Instant}; use crate::error::{Error as SdkError, EthereumBridgeError, QueryError}; use crate::eth_bridge::ethers::abi::{AbiDecode, AbiType, Tokenizable}; -use crate::eth_bridge::ethers::core::types::TransactionReceipt; +use crate::eth_bridge::ethers::types::TransactionReceipt; use crate::eth_bridge::structs::Signature; use crate::internal_macros::{echo_error, trace_error}; use crate::io::Io; @@ -598,25 +598,30 @@ where // update epoch in the contract args.epoch = Some(new_epoch); - let result = relay_validator_set_update_once::( - &args, - Arc::clone(ð_client), - client, - |transf_result| { - let Some(receipt) = transf_result else { - tracing::warn!("No transfer receipt received from the Ethereum node"); - last_call_succeeded = false; - return; - }; - last_call_succeeded = receipt.is_successful(); - if last_call_succeeded { - tracing::info!(?receipt, "Ethereum transfer succeeded"); - tracing::info!(?new_epoch, "Updated the validator set"); - } else { - tracing::error!(?receipt, "Ethereum transfer failed"); - } - }, - ).await; + let result = + relay_validator_set_update_once::( + &args, + Arc::clone(ð_client), + client, + |transf_result| { + let Some(receipt) = transf_result else { + tracing::warn!( + "No transfer receipt received from the Ethereum \ + node" + ); + last_call_succeeded = false; + return; + }; + last_call_succeeded = receipt.is_successful(); + if last_call_succeeded { + tracing::info!(?receipt, "Ethereum transfer succeeded"); + tracing::info!(?new_epoch, "Updated the validator set"); + } else { + tracing::error!(?receipt, "Ethereum transfer failed"); + } + }, + ) + .await; if let Err(err) = result { // only print errors, do not exit diff --git a/crates/sdk/src/events/log.rs b/crates/sdk/src/events/log.rs index 596c23bdc9..3eb19f89e7 100644 --- a/crates/sdk/src/events/log.rs +++ b/crates/sdk/src/events/log.rs @@ -3,9 +3,6 @@ //! The log can only hold `N` events at a time, where `N` is a configurable //! parameter. If the log is holding `N` events, and a new event is logged, //! old events are pruned. - -use std::default::Default; - use circular_queue::CircularQueue; use crate::events::Event; @@ -85,7 +82,7 @@ impl EventLog { #[cfg(test)] mod tests { - use namada_core::types::hash::Hash; + use namada_core::hash::Hash; use super::*; use crate::events::{EventLevel, EventType}; diff --git a/crates/sdk/src/events/log/dumb_queries.rs b/crates/sdk/src/events/log/dumb_queries.rs index 8a639b5a1b..1d2b0527a2 100644 --- a/crates/sdk/src/events/log/dumb_queries.rs +++ b/crates/sdk/src/events/log/dumb_queries.rs @@ -8,8 +8,8 @@ use std::collections::HashMap; -use namada_core::types::hash::Hash; -use namada_core::types::storage::BlockHeight; +use namada_core::hash::Hash; +use namada_core::storage::BlockHeight; use crate::events::{Event, EventType}; use crate::ibc::core::client::types::Height as IbcHeight; diff --git a/crates/sdk/src/events/mod.rs b/crates/sdk/src/events/mod.rs index 646fd1b8d3..c160e08dc8 100644 --- a/crates/sdk/src/events/mod.rs +++ b/crates/sdk/src/events/mod.rs @@ -2,226 +2,12 @@ pub mod log; use std::collections::HashMap; -use std::convert::TryFrom; -use std::fmt::{self, Display}; -use std::ops::{Index, IndexMut}; -use std::str::FromStr; -use borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::ethereum_structs::{BpTransferStatus, EthBridgeEvent}; -use namada_core::types::ibc::IbcEvent; -use namada_tx::data::TxType; +pub use namada_core::event::{Event, EventError, EventLevel, EventType}; use serde_json::Value; // use crate::ledger::governance::utils::ProposalEvent; -use crate::error::{EncodingError, Error, EventError}; -use crate::tendermint_proto::v0_37::abci::EventAttribute; - -impl From for Event { - #[inline] - fn from(event: EthBridgeEvent) -> Event { - Self::from(&event) - } -} - -impl From<&EthBridgeEvent> for Event { - fn from(event: &EthBridgeEvent) -> Event { - match event { - EthBridgeEvent::BridgePool { tx_hash, status } => Event { - event_type: EventType::EthereumBridge, - level: EventLevel::Tx, - attributes: { - let mut attrs = HashMap::new(); - attrs.insert( - "kind".into(), - match status { - BpTransferStatus::Relayed => "bridge_pool_relayed", - BpTransferStatus::Expired => "bridge_pool_expired", - } - .into(), - ); - attrs.insert("tx_hash".into(), tx_hash.to_string()); - attrs - }, - }, - } - } -} - -/// Indicates if an event is emitted do to -/// an individual Tx or the nature of a finalized block -#[derive(Clone, Debug, Eq, PartialEq, BorshSerialize, BorshDeserialize)] -pub enum EventLevel { - /// Indicates an event is to do with a finalized block. - Block, - /// Indicates an event is to do with an individual transaction. - Tx, -} - -/// Custom events that can be queried from Tendermint -/// using a websocket client -#[derive(Clone, Debug, Eq, PartialEq, BorshSerialize, BorshDeserialize)] -pub struct Event { - /// The type of event. - pub event_type: EventType, - /// The level of the event - whether it relates to a block or an individual - /// transaction. - pub level: EventLevel, - /// Key-value attributes of the event. - pub attributes: HashMap, -} - -/// The two types of custom events we currently use -#[derive(Clone, Debug, Eq, PartialEq, BorshSerialize, BorshDeserialize)] -pub enum EventType { - /// The transaction was accepted to be included in a block - Accepted, - /// The transaction was applied during block finalization - Applied, - /// The IBC transaction was applied during block finalization - Ibc(String), - /// The proposal that has been executed - Proposal, - /// The pgf payment - PgfPayment, - /// Ethereum Bridge event - EthereumBridge, -} - -impl Display for EventType { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - EventType::Accepted => write!(f, "accepted"), - EventType::Applied => write!(f, "applied"), - EventType::Ibc(t) => write!(f, "{}", t), - EventType::Proposal => write!(f, "proposal"), - EventType::PgfPayment => write!(f, "pgf_payment"), - EventType::EthereumBridge => write!(f, "ethereum_bridge"), - }?; - Ok(()) - } -} - -impl FromStr for EventType { - type Err = EventError; - - fn from_str(s: &str) -> Result { - match s { - "accepted" => Ok(EventType::Accepted), - "applied" => Ok(EventType::Applied), - "proposal" => Ok(EventType::Proposal), - "pgf_payments" => Ok(EventType::PgfPayment), - // IBC - "update_client" => Ok(EventType::Ibc("update_client".to_string())), - "send_packet" => Ok(EventType::Ibc("send_packet".to_string())), - "write_acknowledgement" => { - Ok(EventType::Ibc("write_acknowledgement".to_string())) - } - "ethereum_bridge" => Ok(EventType::EthereumBridge), - _ => Err(EventError::InvalidEventType), - } - } -} - -impl Event { - /// Creates a new event with the hash and height of the transaction - /// already filled in - pub fn new_tx_event(tx: &namada_tx::Tx, height: u64) -> Self { - let mut event = match tx.header().tx_type { - TxType::Wrapper(_) => { - let mut event = Event { - event_type: EventType::Accepted, - level: EventLevel::Tx, - attributes: HashMap::new(), - }; - event["hash"] = tx.header_hash().to_string(); - event - } - TxType::Decrypted(_) => { - let mut event = Event { - event_type: EventType::Applied, - level: EventLevel::Tx, - attributes: HashMap::new(), - }; - event["hash"] = tx - .clone() - .update_header(TxType::Raw) - .header_hash() - .to_string(); - event - } - TxType::Protocol(_) => { - let mut event = Event { - event_type: EventType::Applied, - level: EventLevel::Tx, - attributes: HashMap::new(), - }; - event["hash"] = tx.header_hash().to_string(); - event - } - _ => unreachable!(), - }; - event["height"] = height.to_string(); - event["log"] = "".to_string(); - event - } - - /// Check if the events keys contains a given string - pub fn contains_key(&self, key: &str) -> bool { - self.attributes.contains_key(key) - } - - /// Get the value corresponding to a given key, if it exists. - /// Else return None. - pub fn get(&self, key: &str) -> Option<&String> { - self.attributes.get(key) - } -} - -impl Index<&str> for Event { - type Output = String; - - fn index(&self, index: &str) -> &Self::Output { - &self.attributes[index] - } -} - -impl IndexMut<&str> for Event { - fn index_mut(&mut self, index: &str) -> &mut Self::Output { - if !self.attributes.contains_key(index) { - self.attributes.insert(String::from(index), String::new()); - } - self.attributes.get_mut(index).unwrap() - } -} - -impl From for Event { - fn from(ibc_event: IbcEvent) -> Self { - Self { - event_type: EventType::Ibc(ibc_event.event_type), - level: EventLevel::Tx, - attributes: ibc_event.attributes, - } - } -} - -/// Convert our custom event into the necessary tendermint proto type -impl From for crate::tendermint_proto::v0_37::abci::Event { - fn from(event: Event) -> Self { - Self { - r#type: event.event_type.to_string(), - attributes: event - .attributes - .into_iter() - .map(|(key, value)| EventAttribute { - key, - value, - index: true, - }) - .collect(), - } - } -} +use crate::error::{EncodingError, Error}; /// A thin wrapper around a HashMap for parsing event JSONs /// returned in tendermint subscription responses. diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs index ba8fd35668..cbe453d875 100644 --- a/crates/sdk/src/lib.rs +++ b/crates/sdk/src/lib.rs @@ -1,6 +1,6 @@ extern crate alloc; -pub use namada_core::{borsh, ibc, tendermint, tendermint_proto, types}; +pub use namada_core::*; #[cfg(feature = "tendermint-rpc")] pub use tendermint_rpc; pub use { @@ -36,13 +36,12 @@ use std::path::PathBuf; use std::str::FromStr; use args::{InputAmount, SdkTypes}; +use namada_core::address::Address; +use namada_core::dec::Dec; +use namada_core::ethereum_events::EthAddress; use namada_core::ibc::core::host::types::identifiers::{ChannelId, PortId}; -use namada_core::types::address::Address; -use namada_core::types::dec::Dec; -use namada_core::types::ethereum_events::EthAddress; -use namada_core::types::key::*; -use namada_core::types::masp::{TransferSource, TransferTarget}; -use namada_core::types::token; +use namada_core::key::*; +use namada_core::masp::{TransferSource, TransferTarget}; use namada_tx::data::wrapper::GasLimit; use namada_tx::Tx; use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -229,15 +228,15 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { ) -> args::Redelegate { args::Redelegate { tx: self.tx_builder(), - /// Source validator address + // Source validator address src_validator, - /// Destination validator address + // Destination validator address dest_validator, - /// Owner of the bonds that are being redelegated + // Owner of the bonds that are being redelegated owner: source, - /// The amount of tokens to redelegate + // The amount of tokens to redelegate amount, - /// Path to the TX WASM code file + // Path to the TX WASM code file tx_code_path: PathBuf::from(TX_REDELEGATE_WASM), } } @@ -763,17 +762,16 @@ pub mod testing { use ibc::primitives::proto::Any; use masp_primitives::transaction::TransparentAddress; use namada_account::{InitAccount, UpdateAccount}; - use namada_core::types::address::testing::{ + use namada_core::address::testing::{ arb_established_address, arb_non_internal_address, }; - use namada_core::types::address::MASP; - use namada_core::types::eth_bridge_pool::PendingTransfer; - use namada_core::types::hash::testing::arb_hash; - use namada_core::types::storage::testing::arb_epoch; - use namada_core::types::token::testing::{ - arb_denominated_amount, arb_transfer, - }; - use namada_core::types::token::Transfer; + use namada_core::address::MASP; + use namada_core::eth_bridge_pool::PendingTransfer; + use namada_core::hash::testing::arb_hash; + use namada_core::key::testing::arb_common_keypair; + use namada_core::storage::testing::arb_epoch; + use namada_core::token::testing::{arb_denominated_amount, arb_transfer}; + use namada_core::token::Transfer; use namada_governance::storage::proposal::testing::{ arb_init_proposal, arb_vote_proposal, }; @@ -786,29 +784,31 @@ pub mod testing { }; use namada_tx::data::{DecryptedTx, Fee, TxType, WrapperTx}; use proptest::prelude::{Just, Strategy}; - use proptest::{option, prop_compose, prop_oneof}; + use proptest::{arbitrary, collection, option, prop_compose, prop_oneof}; use prost::Message; use ripemd::Digest as RipemdDigest; use sha2::Digest; use super::*; use crate::account::tests::{arb_init_account, arb_update_account}; + use crate::chain::ChainId; + use crate::eth_bridge_pool::testing::arb_pending_transfer; + use crate::key::testing::arb_common_pk; use crate::masp::testing::{ arb_deshielding_transfer, arb_shielded_transfer, arb_shielding_transfer, }; + use crate::time::{DateTime, DateTimeUtc, Utc}; use crate::tx::data::pgf::tests::arb_update_steward_commission; use crate::tx::data::pos::tests::{ arb_become_validator, arb_bond, arb_commission_change, arb_consensus_key_change, arb_metadata_change, arb_redelegation, arb_withdraw, }; - use crate::tx::{Code, Commitment, Header, MaspBuilder, Section}; - use crate::types::chain::ChainId; - use crate::types::eth_bridge_pool::testing::arb_pending_transfer; - use crate::types::key::testing::arb_common_pk; - use crate::types::time::{DateTime, DateTimeUtc, Utc}; + use crate::tx::{ + Code, Commitment, Header, MaspBuilder, Section, Signature, + }; - #[derive(Debug)] + #[derive(Debug, Clone)] #[allow(clippy::large_enum_variant)] // To facilitate propagating debugging information pub enum TxData { @@ -834,15 +834,18 @@ pub mod testing { ResignSteward(Address), PendingTransfer(PendingTransfer), IbcAny(Any), - Custom(Box), + Custom, } prop_compose! { // Generate an arbitrary commitment pub fn arb_commitment()( - hash in arb_hash(), + commitment in prop_oneof![ + arb_hash().prop_map(Commitment::Hash), + collection::vec(arbitrary::any::(), 0..=1024).prop_map(Commitment::Id), + ], ) -> Commitment { - Commitment::Hash(hash) + commitment } } @@ -861,6 +864,33 @@ pub mod testing { } } + prop_compose! { + // Generate an arbitrary uttf8 commitment + pub fn arb_utf8_commitment()( + commitment in prop_oneof![ + arb_hash().prop_map(Commitment::Hash), + "[a-zA-Z0-9_]{0,1024}".prop_map(|x| Commitment::Id(x.into_bytes())), + ], + ) -> Commitment { + commitment + } + } + + prop_compose! { + // Generate an arbitrary code section + pub fn arb_utf8_code()( + salt: [u8; 8], + code in arb_utf8_commitment(), + tag in option::of("[a-zA-Z0-9_]{0,1024}"), + ) -> Code { + Code { + salt, + code, + tag, + } + } + } + prop_compose! { // Generate a chain ID pub fn arb_chain_id()(id in "[a-zA-Z0-9_]*") -> ChainId { @@ -986,7 +1016,7 @@ pub mod testing { } // Maximum number of notes to include in a transaction - const MAX_ASSETS: usize = 10; + const MAX_ASSETS: usize = 2; // Type of MASP transaction #[derive(Debug, Clone)] @@ -1148,6 +1178,24 @@ pub mod testing { } } + prop_compose! { + // Generate an arbitrary transaction with maybe a memo + pub fn arb_memoed_tx()( + (mut tx, tx_data) in arb_tx(), + memo in option::of(arb_utf8_code()), + ) -> (Tx, TxData) { + if let Some(memo) = memo { + let sechash = tx + .add_section(Section::ExtraData(memo)) + .get_hash(); + tx.set_memo_sechash(sechash); + } else { + tx.set_memo_sechash(Default::default()); + } + (tx, tx_data) + } + } + prop_compose! { // Generate an arbitrary vote proposal transaction pub fn arb_vote_proposal_tx()( @@ -1434,4 +1482,42 @@ pub mod testing { arb_ibc_any_tx(), ] } + + prop_compose! { + // Generate an arbitrary signature section + pub fn arb_signature(targets: Vec)( + targets in Just(targets), + secret_keys in collection::btree_map( + arbitrary::any::(), + arb_common_keypair(), + 1..3, + ), + signer in option::of(arb_non_internal_address()), + ) -> Signature { + if signer.is_some() { + Signature::new(targets, secret_keys, signer) + } else { + let secret_keys = secret_keys + .into_values() + .enumerate() + .map(|(k, v)| (k as u8, v)) + .collect(); + Signature::new(targets, secret_keys, signer) + } + } + } + + prop_compose! { + // Generate an arbitrary signed tx + pub fn arb_signed_tx()(tx in arb_memoed_tx())( + sigs in collection::vec(arb_signature(tx.0.sechashes()), 0..3), + mut tx in Just(tx), + ) -> (Tx, TxData) { + for sig in sigs { + // Add all the generated signature sections + tx.0.add_section(Section::Signature(sig)); + } + (tx.0, tx.1) + } + } } diff --git a/crates/sdk/src/masp.rs b/crates/sdk/src/masp.rs index 1f6d284705..54442510e5 100644 --- a/crates/sdk/src/masp.rs +++ b/crates/sdk/src/masp.rs @@ -8,8 +8,6 @@ use std::ops::Deref; use std::path::PathBuf; use std::str::FromStr; -// use async_std::io::prelude::WriteExt; -// use async_std::io::{self}; use borsh::{BorshDeserialize, BorshSerialize}; use borsh_ext::BorshSerializeExt; use itertools::Either; @@ -50,16 +48,17 @@ use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; use masp_proofs::bellman::groth16::PreparedVerifyingKey; use masp_proofs::bls12_381::Bls12; use masp_proofs::prover::LocalTxProver; +#[cfg(not(feature = "testing"))] use masp_proofs::sapling::SaplingVerificationContext; -use namada_core::types::address::{Address, MASP}; -use namada_core::types::dec::Dec; -use namada_core::types::masp::{ +use namada_core::address::{Address, MASP}; +use namada_core::dec::Dec; +use namada_core::masp::{ encode_asset_type, AssetData, BalanceOwner, ExtendedViewingKey, PaymentAddress, TransferSource, TransferTarget, }; -use namada_core::types::storage::{BlockHeight, Epoch, IndexedTx, TxIndex}; -use namada_core::types::time::{DateTimeUtc, DurationSecs}; -use namada_core::types::uint::Uint; +use namada_core::storage::{BlockHeight, Epoch, IndexedTx, TxIndex}; +use namada_core::time::{DateTimeUtc, DurationSecs}; +use namada_core::uint::Uint; use namada_ibc::IbcMessage; use namada_token::{self as token, Denomination, MaspDigitPos, Transfer}; use namada_tx::data::{TxResult, WrapperTx}; @@ -71,8 +70,6 @@ use thiserror::Error; use token::storage_key::{balance_key, is_any_shielded_action_balance_key}; use token::Amount; -#[cfg(feature = "testing")] -use crate::error::EncodingError; use crate::error::{Error, PinnedBalanceError, QueryError}; use crate::io::Io; use crate::queries::Client; @@ -88,17 +85,10 @@ use crate::{display_line, edisplay_line, rpc, MaybeSend, MaybeSync, Namada}; /// the default OS specific path is used. pub const ENV_VAR_MASP_PARAMS_DIR: &str = "NAMADA_MASP_PARAMS_DIR"; -/// Env var to either "save" proofs into files or to "load" them from -/// files. -pub const ENV_VAR_MASP_TEST_PROOFS: &str = "NAMADA_MASP_TEST_PROOFS"; - /// Randomness seed for MASP integration tests to build proofs with /// deterministic rng. pub const ENV_VAR_MASP_TEST_SEED: &str = "NAMADA_MASP_TEST_SEED"; -/// A directory to save serialized proofs for tests. -pub const MASP_TEST_PROOFS_DIR: &str = "test_fixtures/masp_proofs"; - /// The network to use for MASP #[cfg(feature = "mainnet")] const NETWORK: MainNetwork = MainNetwork; @@ -116,21 +106,13 @@ pub const CONVERT_NAME: &str = "masp-convert.params"; /// Type alias for convenience and profit pub type IndexedNoteData = BTreeMap< IndexedTx, - ( - Epoch, - BTreeSet, - Transaction, - ), + (Epoch, BTreeSet, Transaction), >; /// Type alias for the entries of [`IndexedNoteData`] iterators pub type IndexedNoteEntry = ( IndexedTx, - ( - Epoch, - BTreeSet, - Transaction, - ), + (Epoch, BTreeSet, Transaction), ); /// Shielded transfer @@ -157,14 +139,6 @@ pub struct MaspTokenRewardData { pub locked_amount_target: Uint, } -#[cfg(feature = "testing")] -#[derive(Clone, Copy, Debug)] -enum LoadOrSaveProofs { - Load, - Save, - Neither, -} - /// A return type for gen_shielded_transfer #[derive(Error, Debug)] pub enum TransferErr { @@ -237,7 +211,9 @@ fn load_pvks() -> &'static PVKs { pub fn check_spend( spend: &SpendDescription<::SaplingAuth>, sighash: &[u8; 32], - ctx: &mut SaplingVerificationContext, + #[cfg(not(feature = "testing"))] ctx: &mut SaplingVerificationContext, + #[cfg(feature = "testing")] + ctx: &mut testing::MockSaplingVerificationContext, parameters: &PreparedVerifyingKey, ) -> bool { let zkproof = @@ -246,6 +222,7 @@ pub fn check_spend( Ok(zkproof) => zkproof, _ => return false, }; + ctx.check_spend( spend.cv, spend.anchor, @@ -261,7 +238,9 @@ pub fn check_spend( /// check_output wrapper pub fn check_output( output: &OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>, - ctx: &mut SaplingVerificationContext, + #[cfg(not(feature = "testing"))] ctx: &mut SaplingVerificationContext, + #[cfg(feature = "testing")] + ctx: &mut testing::MockSaplingVerificationContext, parameters: &PreparedVerifyingKey, ) -> bool { let zkproof = @@ -276,13 +255,16 @@ pub fn check_output( Some(p) => p, None => return false, }; + ctx.check_output(output.cv, output.cmu, epk, zkproof, parameters) } /// check convert wrapper pub fn check_convert( convert: &ConvertDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>, - ctx: &mut SaplingVerificationContext, + #[cfg(not(feature = "testing"))] ctx: &mut SaplingVerificationContext, + #[cfg(feature = "testing")] + ctx: &mut testing::MockSaplingVerificationContext, parameters: &PreparedVerifyingKey, ) -> bool { let zkproof = @@ -291,6 +273,7 @@ pub fn check_convert( Ok(zkproof) => zkproof, _ => return false, }; + ctx.check_convert(convert.cv, convert.anchor, zkproof, parameters) } @@ -368,7 +351,10 @@ pub fn verify_shielded_tx(transaction: &Transaction) -> bool { output_vk, } = load_pvks(); + #[cfg(not(feature = "testing"))] let mut ctx = SaplingVerificationContext::new(true); + #[cfg(feature = "testing")] + let mut ctx = testing::MockSaplingVerificationContext::new(true); let spends_valid = sapling_bundle .shielded_spends .iter() @@ -552,7 +538,7 @@ impl Unscanned { where I: IntoIterator, { - self.txs.extend(items.into_iter()); + self.txs.extend(items); } fn contains_height(&self, height: u64) -> bool { @@ -884,8 +870,7 @@ impl ShieldedContext { tx: &Tx, action_arg: ExtractShieldedActionArg<'args, C>, check_header: bool, - ) -> Result<(BTreeSet, Transaction), Error> - { + ) -> Result<(BTreeSet, Transaction), Error> { let maybe_transaction = if check_header { let tx_header = tx.header(); // NOTE: simply looking for masp sections attached to the tx @@ -1036,7 +1021,7 @@ impl ShieldedContext { &mut self, indexed_tx: IndexedTx, epoch: Epoch, - tx_changed_keys: &BTreeSet, + tx_changed_keys: &BTreeSet, shielded: &Transaction, vk: &ViewingKey, native_token: Address, @@ -1325,7 +1310,7 @@ impl ShieldedContext { let Some(denom) = query_denom(client, token).await else { return Err(Error::Query(QueryError::General(format!( "denomination for token {token}" - )))) + )))); }; for position in MaspDigitPos::iter() { let asset_type = @@ -1392,7 +1377,10 @@ impl ShieldedContext { { // Query for the ID of the last accepted transaction let Some((token, denom, position, ep, conv, path)) = - query_conversion(client, asset_type).await else { return }; + query_conversion(client, asset_type).await + else { + return; + }; self.asset_types.insert( asset_type, AssetData { @@ -1985,6 +1973,8 @@ impl ShieldedContext { let memo = MemoBytes::empty(); // Try to get a seed from env var, if any. + let rng = StdRng::from_rng(OsRng).unwrap(); + #[cfg(feature = "testing")] let rng = if let Ok(seed) = env::var(ENV_VAR_MASP_TEST_SEED) .map_err(|e| Error::Other(e.to_string())) .and_then(|seed| { @@ -2000,7 +1990,7 @@ impl ShieldedContext { ); StdRng::seed_from_u64(seed) } else { - StdRng::from_rng(OsRng).unwrap() + rng }; // Now we build up the transaction within this object @@ -2051,9 +2041,9 @@ impl ShieldedContext { // Convert transaction amount into MASP types let Some(denom) = query_denom(context.client(), token).await else { - return Err(TransferErr::General(Error::from(QueryError::General(format!( - "denomination for token {token}" - ))))) + return Err(TransferErr::General(Error::from( + QueryError::General(format!("denomination for token {token}")), + ))); }; let (asset_types, masp_amount) = { let mut shielded = context.shielded_mut().await; @@ -2292,154 +2282,29 @@ impl ShieldedContext { } } - // To speed up integration tests, we can save and load proofs - #[cfg(feature = "testing")] - let load_or_save = if let Ok(masp_proofs) = - env::var(ENV_VAR_MASP_TEST_PROOFS) - { - let parsed = match masp_proofs.to_ascii_lowercase().as_str() { - "load" => LoadOrSaveProofs::Load, - "save" => LoadOrSaveProofs::Save, - env_var => Err(Error::Other(format!( - "Unexpected value for {ENV_VAR_MASP_TEST_PROOFS} env var. \ - Expecting \"save\" or \"load\", but got \"{env_var}\"." - )))?, - }; - if env::var(ENV_VAR_MASP_TEST_SEED).is_err() { - Err(Error::Other(format!( - "Ensure to set a seed with {ENV_VAR_MASP_TEST_SEED} env \ - var when using {ENV_VAR_MASP_TEST_PROOFS} for \ - deterministic proofs." - )))?; - } - parsed - } else { - LoadOrSaveProofs::Neither - }; - let builder_clone = builder.clone().map_builder(WalletMap); - #[cfg(feature = "testing")] - let builder_bytes = borsh::to_vec(&builder_clone).map_err(|e| { - Error::from(EncodingError::Conversion(e.to_string())) - })?; - - let build_transfer = |prover: LocalTxProver| -> Result< - ShieldedTransfer, - builder::Error, - > { - let (masp_tx, metadata) = builder - .build(&prover, &FeeRule::non_standard(U64Sum::zero()))?; - Ok(ShieldedTransfer { - builder: builder_clone, - masp_tx, - metadata, - epoch, - }) - }; - - #[cfg(feature = "testing")] - { - let builder_hash = - namada_core::types::hash::Hash::sha256(&builder_bytes); - - let saved_filepath = env::current_dir() - .map_err(|e| Error::Other(e.to_string()))? - // Two up from "tests" dir to the root dir - .parent() - .and_then(std::path::Path::parent) - .ok_or_else(|| { - Error::Other("Can not get root dir".to_string()) - })? - .join(MASP_TEST_PROOFS_DIR) - .join(format!("{builder_hash}.bin")); - - if let LoadOrSaveProofs::Load = load_or_save { - let recommendation = format!( - "Re-run the tests with {ENV_VAR_MASP_TEST_PROOFS}=save to \ - re-generate proofs." - ); - let exp_str = format!( - "Read saved MASP proofs from {}. {recommendation}", - saved_filepath.to_string_lossy() - ); - let loaded_bytes = tokio::fs::read(&saved_filepath) - .await - .map_err(|_e| Error::Other(exp_str))?; - - let exp_str = format!( - "Valid `ShieldedTransfer` bytes in {}. {recommendation}", - saved_filepath.to_string_lossy() - ); - let loaded: ShieldedTransfer = - BorshDeserialize::try_from_slice(&loaded_bytes) - .map_err(|_e| Error::Other(exp_str))?; - - // Cache the generated transfer - let mut shielded_ctx = context.shielded_mut().await; - shielded_ctx - .pre_cache_transaction( - context, - &loaded.masp_tx, - source, - target, - token, - epoch, - ) - .await?; - - Ok(Some(loaded)) - } else { - // Build and return the constructed transaction - let built = build_transfer( - context.shielded().await.utils.local_tx_prover(), - )?; - if let LoadOrSaveProofs::Save = load_or_save { - let built_bytes = borsh::to_vec(&built).map_err(|e| { - Error::from(EncodingError::Conversion(e.to_string())) - })?; - tokio::fs::write(&saved_filepath, built_bytes) - .await - .map_err(|e| Error::Other(e.to_string()))?; - } - - // Cache the generated transfer - let mut shielded_ctx = context.shielded_mut().await; - shielded_ctx - .pre_cache_transaction( - context, - &built.masp_tx, - source, - target, - token, - epoch, - ) - .await?; - - Ok(Some(built)) - } - } - + // Build and return the constructed transaction #[cfg(not(feature = "testing"))] - { - // Build and return the constructed transaction - let built = build_transfer( - context.shielded().await.utils.local_tx_prover(), - )?; - - let mut shielded_ctx = context.shielded_mut().await; - shielded_ctx - .pre_cache_transaction( - context, - &built.masp_tx, - source, - target, - token, - epoch, - ) - .await?; + let prover = context.shielded().await.utils.local_tx_prover(); + #[cfg(feature = "testing")] + let prover = testing::MockTxProver(std::sync::Mutex::new(OsRng)); + let (masp_tx, metadata) = + builder.build(&prover, &FeeRule::non_standard(U64Sum::zero()))?; + + // Cache the generated transfer + let mut shielded_ctx = context.shielded_mut().await; + shielded_ctx + .pre_cache_transaction( + context, &masp_tx, source, target, token, epoch, + ) + .await?; - Ok(Some(built)) - } + Ok(Some(ShieldedTransfer { + builder: builder_clone, + masp_tx, + metadata, + epoch, + })) } // Updates the internal state with the data of the newly generated @@ -2756,7 +2621,7 @@ enum ExtractShieldedActionArg<'args, C: Client + Sync> { async fn extract_payload_from_shielded_action<'args, C: Client + Sync>( tx_data: &[u8], args: ExtractShieldedActionArg<'args, C>, -) -> Result<(BTreeSet, Transaction), Error> { +) -> Result<(BTreeSet, Transaction), Error> { let message = namada_ibc::decode_message(tx_data) .map_err(|e| Error::Other(e.to_string()))?; @@ -2833,7 +2698,7 @@ async fn extract_payload_from_shielded_action<'args, C: Client + Sync>( TxResult::from_str(&attribute.value).unwrap(); for ibc_event in &tx_result.ibc_events { let event = - namada_core::types::ibc::get_shielded_transfer( + namada_core::ibc::get_shielded_transfer( ibc_event, ) .ok() @@ -2985,58 +2850,146 @@ pub mod testing { use std::ops::AddAssign; use std::sync::Mutex; - use masp_primitives::asset_type::AssetType; + use bls12_381::{G1Affine, G2Affine}; use masp_primitives::consensus::testing::arb_height; use masp_primitives::constants::SPENDING_KEY_GENERATOR; - use masp_primitives::convert::AllowedConversion; use masp_primitives::ff::Field; - use masp_primitives::merkle_tree::MerklePath; use masp_primitives::sapling::prover::TxProver; - use masp_primitives::sapling::redjubjub::{PublicKey, Signature}; - use masp_primitives::sapling::{ - Diversifier, Node, PaymentAddress, ProofGenerationKey, Rseed, - }; - use masp_primitives::transaction::components::{I128Sum, GROTH_PROOF_SIZE}; - use proptest::collection::SizeRange; + use masp_primitives::sapling::redjubjub::Signature; + use masp_primitives::sapling::{ProofGenerationKey, Rseed}; + use masp_primitives::transaction::components::GROTH_PROOF_SIZE; + use masp_proofs::bellman::groth16::Proof; use proptest::prelude::*; + use proptest::sample::SizeRange; use proptest::test_runner::TestRng; use proptest::{collection, option, prop_compose}; use super::*; + use crate::address::testing::arb_address; use crate::masp_primitives::consensus::BranchId; use crate::masp_primitives::constants::VALUE_COMMITMENT_RANDOMNESS_GENERATOR; use crate::masp_primitives::merkle_tree::FrozenCommitmentTree; use crate::masp_primitives::sapling::keys::OutgoingViewingKey; use crate::masp_primitives::sapling::redjubjub::PrivateKey; use crate::masp_primitives::transaction::components::transparent::testing::arb_transparent_address; + use crate::masp_proofs::sapling::SaplingVerificationContextInner; + use crate::storage::testing::arb_epoch; use crate::token::testing::arb_denomination; - use crate::types::address::testing::arb_address; - use crate::types::storage::testing::arb_epoch; - - #[derive(Debug, Clone)] - // Adapts a CSPRNG from a PRNG for proptesting - pub struct TestCsprng(R); - impl CryptoRng for TestCsprng {} + /// A context object for verifying the Sapling components of a single Zcash + /// transaction. Same as SaplingVerificationContext, but always assumes the + /// proofs to be valid. + pub struct MockSaplingVerificationContext { + inner: SaplingVerificationContextInner, + zip216_enabled: bool, + } - impl RngCore for TestCsprng { - fn next_u32(&mut self) -> u32 { - self.0.next_u32() + impl MockSaplingVerificationContext { + /// Construct a new context to be used with a single transaction. + pub fn new(zip216_enabled: bool) -> Self { + MockSaplingVerificationContext { + inner: SaplingVerificationContextInner::new(), + zip216_enabled, + } } - fn next_u64(&mut self) -> u64 { - self.0.next_u64() + /// Perform consensus checks on a Sapling SpendDescription, while + /// accumulating its value commitment inside the context for later use. + #[allow(clippy::too_many_arguments)] + pub fn check_spend( + &mut self, + cv: jubjub::ExtendedPoint, + anchor: bls12_381::Scalar, + nullifier: &[u8; 32], + rk: PublicKey, + sighash_value: &[u8; 32], + spend_auth_sig: Signature, + zkproof: Proof, + _verifying_key: &PreparedVerifyingKey, + ) -> bool { + let zip216_enabled = true; + self.inner.check_spend( + cv, + anchor, + nullifier, + rk, + sighash_value, + spend_auth_sig, + zkproof, + &mut (), + |_, rk, msg, spend_auth_sig| { + rk.verify_with_zip216( + &msg, + &spend_auth_sig, + SPENDING_KEY_GENERATOR, + zip216_enabled, + ) + }, + |_, _proof, _public_inputs| true, + ) } - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.0.fill_bytes(dest) + /// Perform consensus checks on a Sapling SpendDescription, while + /// accumulating its value commitment inside the context for later use. + #[allow(clippy::too_many_arguments)] + pub fn check_convert( + &mut self, + cv: jubjub::ExtendedPoint, + anchor: bls12_381::Scalar, + zkproof: Proof, + _verifying_key: &PreparedVerifyingKey, + ) -> bool { + self.inner.check_convert( + cv, + anchor, + zkproof, + &mut (), + |_, _proof, _public_inputs| true, + ) } - fn try_fill_bytes( + /// Perform consensus checks on a Sapling OutputDescription, while + /// accumulating its value commitment inside the context for later use. + pub fn check_output( &mut self, - dest: &mut [u8], - ) -> Result<(), rand::Error> { - self.0.try_fill_bytes(dest) + cv: jubjub::ExtendedPoint, + cmu: bls12_381::Scalar, + epk: jubjub::ExtendedPoint, + zkproof: Proof, + _verifying_key: &PreparedVerifyingKey, + ) -> bool { + self.inner.check_output( + cv, + cmu, + epk, + zkproof, + |_proof, _public_inputs| true, + ) + } + + /// Perform consensus checks on the valueBalance and bindingSig parts of + /// a Sapling transaction. All SpendDescriptions and + /// OutputDescriptions must have been checked before calling + /// this function. + pub fn final_check( + &self, + value_balance: I128Sum, + sighash_value: &[u8; 32], + binding_sig: Signature, + ) -> bool { + self.inner.final_check( + value_balance, + sighash_value, + binding_sig, + |bvk, msg, binding_sig| { + bvk.verify_with_zip216( + &msg, + &binding_sig, + VALUE_COMMITMENT_RANDOMNESS_GENERATOR, + self.zip216_enabled, + ) + }, + ) } } @@ -3083,7 +3036,7 @@ pub mod testing { // An implementation of TxProver that does everything except generating // valid zero-knowledge proofs. Uses the supplied source of randomness to // carry out its operations. - pub struct MockTxProver(Mutex); + pub struct MockTxProver(pub Mutex); impl TxProver for MockTxProver { type SaplingProvingContext = SaplingProvingContext; @@ -3140,14 +3093,23 @@ pub mod testing { // Accumulate the value commitment in the context ctx.cv_sum += value_commitment; - Ok(([0u8; GROTH_PROOF_SIZE], value_commitment, rk)) + let mut zkproof = [0u8; GROTH_PROOF_SIZE]; + let proof = Proof:: { + a: G1Affine::generator(), + b: G2Affine::generator(), + c: G1Affine::generator(), + }; + proof + .write(&mut zkproof[..]) + .expect("should be able to serialize a proof"); + Ok((zkproof, value_commitment, rk)) } fn output_proof( &self, ctx: &mut Self::SaplingProvingContext, _esk: jubjub::Fr, - _payment_address: PaymentAddress, + _payment_address: masp_primitives::sapling::PaymentAddress, _rcm: jubjub::Fr, asset_type: AssetType, value: u64, @@ -3180,7 +3142,17 @@ pub mod testing { // check internal consistency. ctx.cv_sum -= value_commitment_point; // Outputs subtract from the total. - ([0u8; GROTH_PROOF_SIZE], value_commitment_point) + let mut zkproof = [0u8; GROTH_PROOF_SIZE]; + let proof = Proof:: { + a: G1Affine::generator(), + b: G2Affine::generator(), + c: G1Affine::generator(), + }; + proof + .write(&mut zkproof[..]) + .expect("should be able to serialize a proof"); + + (zkproof, value_commitment_point) } fn convert_proof( @@ -3218,7 +3190,17 @@ pub mod testing { // Accumulate the value commitment in the context ctx.cv_sum += value_commitment; - Ok(([0u8; GROTH_PROOF_SIZE], value_commitment)) + let mut zkproof = [0u8; GROTH_PROOF_SIZE]; + let proof = Proof:: { + a: G1Affine::generator(), + b: G2Affine::generator(), + c: G1Affine::generator(), + }; + proof + .write(&mut zkproof[..]) + .expect("should be able to serialize a proof"); + + Ok((zkproof, value_commitment)) } fn binding_sig( @@ -3276,6 +3258,33 @@ pub mod testing { } } + #[derive(Debug, Clone)] + // Adapts a CSPRNG from a PRNG for proptesting + pub struct TestCsprng(R); + + impl CryptoRng for TestCsprng {} + + impl RngCore for TestCsprng { + fn next_u32(&mut self) -> u32 { + self.0.next_u32() + } + + fn next_u64(&mut self) -> u64 { + self.0.next_u64() + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + self.0.fill_bytes(dest) + } + + fn try_fill_bytes( + &mut self, + dest: &mut [u8], + ) -> Result<(), rand::Error> { + self.0.try_fill_bytes(dest) + } + } + prop_compose! { // Expose a random number generator pub fn arb_rng()(rng in Just(()).prop_perturb(|(), rng| rng)) -> TestRng { @@ -3373,7 +3382,7 @@ pub mod testing { // Maximum value for a note partition const MAX_MONEY: u64 = 100; // Maximum number of partitions for a note - const MAX_SPLITS: usize = 10; + const MAX_SPLITS: usize = 3; prop_compose! { // Arbitrarily partition the given vector of integers into sets and sum diff --git a/crates/sdk/src/queries/mod.rs b/crates/sdk/src/queries/mod.rs index bf30131170..8d612a68eb 100644 --- a/crates/sdk/src/queries/mod.rs +++ b/crates/sdk/src/queries/mod.rs @@ -2,7 +2,7 @@ //! defined via `router!` macro. // Re-export to show in rustdoc! -use namada_core::types::storage::BlockHeight; +use namada_core::storage::BlockHeight; use namada_state::{DBIter, StorageHasher, DB}; pub use shell::Shell; use shell::SHELL; @@ -60,7 +60,7 @@ where { if request.height.value() != 0 && request.height.value() - != ctx.wl_storage.storage.get_last_block_height().0 + != ctx.state.in_mem().get_last_block_height().0 { return Err(namada_storage::Error::new_const( "This query doesn't support arbitrary block heights, only the \ @@ -96,9 +96,8 @@ pub fn require_no_data(request: &RequestQuery) -> namada_storage::Result<()> { /// Queries testing helpers #[cfg(any(test, feature = "testing"))] mod testing { - - use namada_core::types::storage::BlockHeight; - use namada_state::testing::TestWlStorage; + use borsh_ext::BorshSerializeExt; + use namada_state::testing::TestState; use tendermint_rpc::Response; use super::*; @@ -112,8 +111,8 @@ mod testing { { /// RPC router pub rpc: RPC, - /// storage - pub wl_storage: TestWlStorage, + /// state + pub state: TestState, /// event log pub event_log: EventLog, } @@ -126,24 +125,20 @@ mod testing { /// Initialize a test client for the given root RPC router pub fn new(rpc: RPC) -> Self { // Initialize the `TestClient` - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); // Initialize mock gas limit let max_block_gas_key = namada_parameters::storage::get_max_block_gas_key(); - wl_storage - .storage - .write( - &max_block_gas_key, - namada_core::types::encode(&20_000_000_u64), - ) + state + .db_write(&max_block_gas_key, 20_000_000_u64.serialize_to_vec()) .expect( "Max block gas parameter must be initialized in storage", ); let event_log = EventLog::default(); Self { rpc, - wl_storage, + state, event_log, } } @@ -179,7 +174,7 @@ mod testing { prove, }; let ctx = RequestCtx { - wl_storage: &self.wl_storage, + state: self.state.read_only(), event_log: &self.event_log, vp_wasm_cache: (), tx_wasm_cache: (), diff --git a/crates/sdk/src/queries/router.rs b/crates/sdk/src/queries/router.rs index 25a55ecf7f..5befe67f3b 100644 --- a/crates/sdk/src/queries/router.rs +++ b/crates/sdk/src/queries/router.rs @@ -126,6 +126,10 @@ macro_rules! try_match_segments { ); } )* + + return Err( + $crate::queries::router::Error::WrongPath($request.path.clone())) + .into_storage_result(); }; // Terminal tail call, invoked after when all the args in the current @@ -401,7 +405,7 @@ macro_rules! pattern_and_handler_to_method { `storage_value` and `storage_prefix`) from `storage_value`."] pub async fn storage_value(&self, client: &CLIENT, data: Option>, - height: Option, + height: Option, prove: bool, $( $param: &$param_ty ),* ) @@ -453,7 +457,7 @@ macro_rules! pattern_and_handler_to_method { `storage_value` and `storage_prefix`) from `" $handle "`."] pub async fn $handle(&self, client: &CLIENT, data: Option>, - height: Option, + height: Option, prove: bool, $( $param: &$param_ty ),* ) @@ -844,8 +848,8 @@ macro_rules! router { #[cfg(test)] mod test_rpc_handlers { use borsh_ext::BorshSerializeExt; - use namada_core::types::storage::Epoch; - use namada_core::types::token; + use namada_core::storage::Epoch; + use namada_core::token; use namada_state::{DBIter, StorageHasher, DB}; use crate::queries::{ @@ -971,8 +975,8 @@ mod test_rpc_handlers { /// ``` #[cfg(test)] mod test_rpc { - use namada_core::types::storage::Epoch; - use namada_core::types::token; + use namada_core::storage::Epoch; + use namada_core::token; use super::test_rpc_handlers::*; @@ -1009,10 +1013,10 @@ mod test_rpc { #[cfg(test)] mod test { + use namada_core::storage::Epoch; use namada_core::tendermint::block; - use namada_core::types::storage::Epoch; - use namada_core::types::token; - use namada_core::types::token::NATIVE_MAX_DECIMAL_PLACES; + use namada_core::token; + use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; use super::test_rpc::TEST_RPC; use crate::queries::testing::TestClient; @@ -1032,7 +1036,26 @@ mod test { }; let ctx = RequestCtx { event_log: &client.event_log, - wl_storage: &client.wl_storage, + state: &client.state, + vp_wasm_cache: (), + tx_wasm_cache: (), + storage_read_past_height_limit: None, + }; + let result = TEST_RPC.handle(ctx, &request); + assert!(result.is_err()); + + // Test request with another invalid path. + // The key difference here is that we are testing + // an invalid path in a nested segment. + let request = RequestQuery { + path: "/b/4".to_owned(), + data: Default::default(), + height: block::Height::from(0_u32), + prove: Default::default(), + }; + let ctx = RequestCtx { + event_log: &client.event_log, + state: &client.state, vp_wasm_cache: (), tx_wasm_cache: (), storage_read_past_height_limit: None, @@ -1049,7 +1072,7 @@ mod test { }; let ctx = RequestCtx { event_log: &client.event_log, - wl_storage: &client.wl_storage, + state: &client.state, vp_wasm_cache: (), tx_wasm_cache: (), storage_read_past_height_limit: None, diff --git a/crates/sdk/src/queries/shell.rs b/crates/sdk/src/queries/shell.rs index ea885ee4a6..d4bcca3409 100644 --- a/crates/sdk/src/queries/shell.rs +++ b/crates/sdk/src/queries/shell.rs @@ -8,17 +8,17 @@ use masp_primitives::asset_type::AssetType; use masp_primitives::merkle_tree::MerklePath; use masp_primitives::sapling::Node; use namada_account::{Account, AccountPublicKeysMap}; +use namada_core::address::Address; +use namada_core::dec::Dec; +use namada_core::hash::Hash; use namada_core::hints; -use namada_core::types::address::Address; -use namada_core::types::dec::Dec; -use namada_core::types::hash::Hash; -use namada_core::types::storage::{ +use namada_core::storage::{ self, BlockHeight, BlockResults, Epoch, KeySeg, PrefixValue, }; -use namada_core::types::token::{Denomination, MaspDigitPos}; -use namada_core::types::uint::Uint; -use namada_state::{DBIter, LastBlock, StorageHasher, DB}; -use namada_storage::{self, ResultExt, StorageRead}; +use namada_core::token::{Denomination, MaspDigitPos}; +use namada_core::uint::Uint; +use namada_state::{DBIter, LastBlock, StateRead, StorageHasher, DB}; +use namada_storage::{ResultExt, StorageRead}; #[cfg(any(test, feature = "async-client"))] use namada_tx::data::TxResult; @@ -138,10 +138,10 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let (iter, _gas) = ctx.wl_storage.storage.iter_results(); + let (iter, _gas) = ctx.state.db_iter_results(); let mut results = vec![ BlockResults::default(); - ctx.wl_storage.storage.block.height.0 as usize + 1 + ctx.state.in_mem().block.height.0 as usize + 1 ]; for (key, value, _gas) in iter { let key = u64::parse(key.clone()).map_err(|_| { @@ -176,8 +176,8 @@ where H: 'static + StorageHasher + Sync, { Ok(ctx - .wl_storage - .storage + .state + .in_mem() .conversion_state .assets .iter() @@ -202,12 +202,8 @@ where H: 'static + StorageHasher + Sync, { // Conversion values are constructed on request - if let Some(((addr, denom, digit), epoch, conv, pos)) = ctx - .wl_storage - .storage - .conversion_state - .assets - .get(&asset_type) + if let Some(((addr, denom, digit), epoch, conv, pos)) = + ctx.state.in_mem().conversion_state.assets.get(&asset_type) { Ok(Some(( addr.clone(), @@ -217,7 +213,7 @@ where Into::::into( conv.clone(), ), - ctx.wl_storage.storage.conversion_state.tree.path(*pos), + ctx.state.in_mem().conversion_state.tree.path(*pos), ))) } else { Ok(None) @@ -232,11 +228,11 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let tokens = ctx.wl_storage.storage.conversion_state.tokens.clone(); + let tokens = ctx.state.in_mem().conversion_state.tokens.clone(); let mut data = Vec::::new(); for (name, token) in tokens { let max_reward_rate = ctx - .wl_storage + .state .read::(&namada_token::storage_key::masp_max_reward_rate_key( &token, ))? @@ -250,7 +246,7 @@ where )) })?; let kd_gain = ctx - .wl_storage + .state .read::(&namada_token::storage_key::masp_kd_gain_key(&token))? .ok_or_else(|| { namada_storage::Error::new(std::io::Error::new( @@ -262,7 +258,7 @@ where )) })?; let kp_gain = ctx - .wl_storage + .state .read::(&namada_token::storage_key::masp_kp_gain_key(&token))? .ok_or_else(|| { namada_storage::Error::new(std::io::Error::new( @@ -274,7 +270,7 @@ where )) })?; let locked_amount_target = ctx - .wl_storage + .state .read::( &namada_token::storage_key::masp_locked_amount_target_key( &token, @@ -310,7 +306,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let data = ctx.wl_storage.storage.last_epoch; + let data = ctx.state.in_mem().last_epoch; Ok(data) } @@ -321,7 +317,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let data = ctx.wl_storage.storage.native_token.clone(); + let data = ctx.state.in_mem().native_token.clone(); Ok(data) } @@ -333,7 +329,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - Ok(ctx.wl_storage.storage.block.pred_epochs.get_epoch(height)) + Ok(ctx.state.in_mem().block.pred_epochs.get_epoch(height)) } fn last_block( @@ -343,7 +339,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - Ok(ctx.wl_storage.storage.last_block.clone()) + Ok(ctx.state.in_mem().last_block.clone()) } fn first_block_height_of_current_epoch( @@ -353,8 +349,8 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - ctx.wl_storage - .storage + ctx.state + .in_mem() .block .pred_epochs .first_block_heights @@ -379,7 +375,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let last_committed_height = ctx.wl_storage.storage.get_last_block_height(); + let last_committed_height = ctx.state.in_mem().get_last_block_height(); let queried_height = { let height: BlockHeight = request.height.into(); let is_last_height_query = height.0 == 0; @@ -405,16 +401,14 @@ where } match ctx - .wl_storage - .storage - .read_with_height(&storage_key, queried_height) + .state + .db_read_with_height(&storage_key, queried_height) .into_storage_result()? { (Some(value), _gas) => { let proof = if request.prove { let proof = ctx - .wl_storage - .storage + .state .get_existence_proof(&storage_key, &value, queried_height) .into_storage_result()?; Some(proof) @@ -430,8 +424,7 @@ where (None, _gas) => { let proof = if request.prove { let proof = ctx - .wl_storage - .storage + .state .get_non_existence_proof(&storage_key, queried_height) .into_storage_result()?; Some(proof) @@ -458,7 +451,7 @@ where { require_latest_height(&ctx, request)?; - let iter = namada_storage::iter_prefix_bytes(ctx.wl_storage, &storage_key)?; + let iter = namada_storage::iter_prefix_bytes(ctx.state, &storage_key)?; let data: namada_storage::Result> = iter .map(|iter_result| { let (key, value) = iter_result?; @@ -469,7 +462,7 @@ where let proof = if request.prove { let queried_height = { let last_committed_height = - ctx.wl_storage.storage.get_last_block_height(); + ctx.state.in_mem().get_last_block_height(); let height: BlockHeight = request.height.into(); let is_last_height_query = height.0 == 0; @@ -483,8 +476,7 @@ where let mut ops = vec![]; for PrefixValue { key, value } in &data { let mut proof = ctx - .wl_storage - .storage + .state .get_existence_proof(key, value, queried_height) .into_storage_result()?; ops.append(&mut proof.ops); @@ -511,7 +503,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let data = StorageRead::has_key(ctx.wl_storage, &storage_key)?; + let data = StorageRead::has_key(ctx.state, &storage_key)?; Ok(data) } @@ -607,11 +599,11 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let account_exists = namada_account::exists(ctx.wl_storage, &owner)?; + let account_exists = namada_account::exists(ctx.state, &owner)?; if account_exists { - let public_keys = namada_account::public_keys(ctx.wl_storage, &owner)?; - let threshold = namada_account::threshold(ctx.wl_storage, &owner)?; + let public_keys = namada_account::public_keys(ctx.state, &owner)?; + let threshold = namada_account::threshold(ctx.state, &owner)?; Ok(Some(Account { public_keys_map: AccountPublicKeysMap::from_iter(public_keys), @@ -631,14 +623,14 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let public_keys = namada_account::public_keys(ctx.wl_storage, &owner)?; + let public_keys = namada_account::public_keys(ctx.state, &owner)?; Ok(!public_keys.is_empty()) } #[cfg(test)] mod test { - use namada_core::types::address; + use namada_core::address; use namada_token::storage_key::balance_key; use crate::queries::RPC; diff --git a/crates/sdk/src/queries/shell/eth_bridge.rs b/crates/sdk/src/queries/shell/eth_bridge.rs index 92ee5f3ca8..766d191427 100644 --- a/crates/sdk/src/queries/shell/eth_bridge.rs +++ b/crates/sdk/src/queries/shell/eth_bridge.rs @@ -6,20 +6,17 @@ use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; use borsh_ext::BorshSerializeExt; -use namada_core::hints; -use namada_core::types::address::Address; -use namada_core::types::eth_abi::{Encode, EncodeCell}; -use namada_core::types::eth_bridge_pool::{ - PendingTransfer, PendingTransferAppendix, -}; -use namada_core::types::ethereum_events::{ +use namada_core::address::Address; +use namada_core::eth_abi::{Encode, EncodeCell}; +use namada_core::eth_bridge_pool::{PendingTransfer, PendingTransferAppendix}; +use namada_core::ethereum_events::{ EthAddress, EthereumEvent, TransferToEthereum, }; -use namada_core::types::ethereum_structs; -use namada_core::types::keccak::KeccakHash; -use namada_core::types::storage::{BlockHeight, DbKeySeg, Epoch, Key}; -use namada_core::types::token::Amount; -use namada_core::types::voting_power::FractionalVotingPower; +use namada_core::keccak::KeccakHash; +use namada_core::storage::{BlockHeight, DbKeySeg, Epoch, Key}; +use namada_core::token::Amount; +use namada_core::voting_power::FractionalVotingPower; +use namada_core::{ethereum_structs, hints}; use namada_ethereum_bridge::protocol::transactions::votes::{ EpochedVotingPower, EpochedVotingPowerExt, }; @@ -34,7 +31,7 @@ use namada_ethereum_bridge::storage::{ use namada_proof_of_stake::pos_queries::PosQueries; use namada_state::MembershipProof::BridgePool; use namada_state::{DBIter, StorageHasher, StoreRef, StoreType, DB}; -use namada_storage::{self, CustomError, ResultExt, StorageRead}; +use namada_storage::{CustomError, ResultExt, StorageRead}; use namada_vote_ext::validator_set_update::{ ValidatorSetArgs, VotingPowersMap, }; @@ -233,16 +230,15 @@ where } let mut status = TransferToEthereumStatus { - queried_height: ctx.wl_storage.storage.get_last_block_height(), + queried_height: ctx.state.in_mem().get_last_block_height(), ..Default::default() }; // check which transfers in the Bridge pool match the requested hashes let merkle_tree = ctx - .wl_storage - .storage + .state .get_merkle_tree( - ctx.wl_storage.storage.get_last_block_height(), + ctx.state.in_mem().get_last_block_height(), Some(StoreType::BridgePool), ) .expect("We should always be able to read the database"); @@ -341,7 +337,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let ethbridge_queries = ctx.wl_storage.ethbridge_queries(); + let ethbridge_queries = ctx.state.ethbridge_queries(); let whitelisted = ethbridge_queries.is_token_whitelisted(&asset); let supply = ethbridge_queries @@ -366,10 +362,10 @@ where H: 'static + StorageHasher + Sync, T: BorshDeserialize, { - let Some(contract) = StorageRead::read(ctx.wl_storage, key)? else { + let Some(contract) = StorageRead::read(ctx.state, key)? else { return Err(namada_storage::Error::SimpleMessage( - "Failed to read contract: The Ethereum bridge \ - storage is not initialized", + "Failed to read contract: The Ethereum bridge storage is not \ + initialized", )); }; Ok(contract) @@ -411,7 +407,7 @@ where H: 'static + StorageHasher + Sync, { Ok(read_ethereum_bridge_pool_at_height( - ctx.wl_storage.storage.get_last_block_height(), + ctx.state.in_mem().get_last_block_height(), ctx, )) } @@ -427,7 +423,7 @@ where { // get the latest signed merkle root of the Ethereum bridge pool let (_, height) = ctx - .wl_storage + .state .ethbridge_queries() .get_signed_bridge_pool_root() .ok_or(namada_storage::Error::SimpleMessage( @@ -449,8 +445,7 @@ where // get the backing store of the merkle tree corresponding // at the specified height. let merkle_tree = ctx - .wl_storage - .storage + .state .get_merkle_tree(height, Some(StoreType::BridgePool)) .expect("We should always be able to read the database"); let stores = merkle_tree.stores(); @@ -463,9 +458,8 @@ where .keys() .map(|hash| { let value = ctx - .wl_storage - .storage - .read_with_height(&get_key_from_hash(hash), height) + .state + .db_read_with_height(&get_key_from_hash(hash), height) .unwrap() .0 .unwrap(); @@ -493,7 +487,7 @@ where { // get the latest signed merkle root of the Ethereum bridge pool let (signed_root, height) = ctx - .wl_storage + .state .ethbridge_queries() .get_signed_bridge_pool_root() .ok_or(namada_storage::Error::SimpleMessage( @@ -505,7 +499,7 @@ where // make sure a relay attempt won't happen before the new signed // root has had time to be generated let latest_bp_nonce = - ctx.wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + ctx.state.ethbridge_queries().get_bridge_pool_nonce(); if latest_bp_nonce != signed_root.data.1 { return Err(namada_storage::Error::Custom(CustomError( format!( @@ -519,8 +513,7 @@ where // get the merkle tree corresponding to the above root. let tree = ctx - .wl_storage - .storage + .state .get_merkle_tree(height, Some(StoreType::BridgePool)) .into_storage_result()?; // from the hashes of the transfers, get the actual values. @@ -529,7 +522,7 @@ where .iter() .filter_map(|hash| { let key = get_key_from_hash(hash); - match ctx.wl_storage.read_bytes(&key) { + match ctx.state.read_bytes(&key) { Ok(Some(bytes)) => Some((key, bytes)), _ => { missing_hashes.push(hash); @@ -568,7 +561,7 @@ where ) { Ok(BridgePool(proof)) => { let (validator_args, voting_powers) = ctx - .wl_storage + .state .ethbridge_queries() .get_bridge_validator_set(None); let relay_proof = ethereum_structs::RelayProof { @@ -619,7 +612,7 @@ where { let mut pending_events = HashMap::new(); for (mut key, value) in ctx - .wl_storage + .state .iter_prefix(ð_msgs_prefix())? .filter_map(|(k, v, _)| { let key = Key::from_str(&k).expect( @@ -640,11 +633,8 @@ where *key.segments.last_mut().unwrap() = DbKeySeg::StringSeg(Keys::segments().seen.into()); // check if the event has been seen - let is_seen = ctx - .wl_storage - .read::(&key) - .into_storage_result()? - .expect( + let is_seen = + ctx.state.read::(&key).into_storage_result()?.expect( "Iterating over storage should not yield keys without values.", ); if is_seen { @@ -658,18 +648,18 @@ where *key.segments.last_mut().unwrap() = DbKeySeg::StringSeg(Keys::segments().voting_power.into()); let voting_power = ctx - .wl_storage + .state .read::(&key) .into_storage_result()? .expect( "Iterating over storage should not yield keys without \ values.", ) - .fractional_stake(ctx.wl_storage); + .fractional_stake(ctx.state); for transfer in transfers { let key = get_key_from_hash(&transfer.keccak256()); let transfer = ctx - .wl_storage + .state .read::(&key) .into_storage_result()? .expect("The transfer must be present in storage"); @@ -699,7 +689,7 @@ where .into(), ))); } - let current_epoch = ctx.wl_storage.storage.last_epoch; + let current_epoch = ctx.state.in_mem().last_epoch; if epoch > current_epoch.next() { return Err(namada_storage::Error::Custom(CustomError( format!( @@ -710,7 +700,7 @@ where ))); } - if !ctx.wl_storage.ethbridge_queries().valset_upd_seen(epoch) { + if !ctx.state.ethbridge_queries().valset_upd_seen(epoch) { return Err(namada_storage::Error::Custom(CustomError( format!( "Validator set update proof is not yet available for the \ @@ -722,7 +712,7 @@ where let valset_upd_keys = vote_tallies::Keys::from(&epoch); let proof: EthereumProof = - StorageRead::read(ctx.wl_storage, &valset_upd_keys.body())?.expect( + StorageRead::read(ctx.state, &valset_upd_keys.body())?.expect( "EthereumProof is seen in storage, therefore it must exist", ); @@ -742,7 +732,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let current_epoch = ctx.wl_storage.storage.last_epoch; + let current_epoch = ctx.state.in_mem().last_epoch; if epoch > current_epoch.next() { Err(namada_storage::Error::Custom(CustomError( format!( @@ -753,7 +743,7 @@ where ))) } else { Ok(ctx - .wl_storage + .state .ethbridge_queries() .get_bridge_validator_set(Some(epoch)) .0) @@ -772,7 +762,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let current_epoch = ctx.wl_storage.storage.last_epoch; + let current_epoch = ctx.state.in_mem().last_epoch; if epoch > current_epoch.next() { Err(namada_storage::Error::Custom(CustomError( format!( @@ -783,7 +773,7 @@ where ))) } else { Ok(ctx - .wl_storage + .state .ethbridge_queries() .get_governance_validator_set(Some(epoch)) .0) @@ -800,7 +790,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let maybe_epoch = ctx.wl_storage.pos_queries().get_epoch(height); + let maybe_epoch = ctx.state.pos_queries().get_epoch(height); let Some(epoch) = maybe_epoch else { return Err(namada_storage::Error::SimpleMessage( "The epoch of the requested height does not exist", @@ -819,14 +809,14 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let current_epoch = ctx.wl_storage.storage.get_current_epoch().0; + let current_epoch = ctx.state.in_mem().get_current_epoch().0; if epoch > current_epoch + 1u64 { return Err(namada_storage::Error::SimpleMessage( "The requested epoch cannot be queried", )); } let (_, voting_powers) = ctx - .wl_storage + .state .ethbridge_queries() .get_bridge_validator_set(Some(epoch)); Ok(voting_powers) @@ -837,25 +827,18 @@ mod test_ethbridge_router { use std::collections::BTreeMap; use assert_matches::assert_matches; - use namada_core::types::address::nam; - use namada_core::types::address::testing::established_address_1; - use namada_core::types::eth_abi::Encode; - use namada_core::types::eth_bridge_pool::{ - GasFee, PendingTransfer, TransferToEthereum, TransferToEthereumKind, - }; - use namada_core::types::ethereum_events::EthAddress; - use namada_core::types::storage::BlockHeight; - use namada_core::types::voting_power::{ - EthBridgeVotingPower, FractionalVotingPower, + use namada_core::address::testing::{established_address_1, nam}; + use namada_core::eth_bridge_pool::{ + GasFee, TransferToEthereum, TransferToEthereumKind, }; + use namada_core::voting_power::EthBridgeVotingPower; use namada_ethereum_bridge::protocol::transactions::validator_set_update::aggregate_votes; use namada_ethereum_bridge::storage::bridge_pool::{ get_pending_key, get_signed_root_key, BridgePoolTree, }; use namada_ethereum_bridge::storage::proof::BridgePoolRootProof; use namada_ethereum_bridge::storage::whitelist; - use namada_proof_of_stake::pos_queries::PosQueries; - use namada_state::mockdb::MockDBWriteBatch; + use namada_storage::mockdb::MockDBWriteBatch; use namada_storage::StorageWrite; use namada_vote_ext::validator_set_update; use namada_vote_ext::validator_set_update::{ @@ -872,16 +855,15 @@ mod test_ethbridge_router { async fn test_read_consensus_valset() { let mut client = TestClient::new(RPC); let epoch = Epoch(0); - assert_eq!(client.wl_storage.storage.last_epoch, epoch); + assert_eq!(client.state.in_mem().last_epoch, epoch); // write validator to storage - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); // commit the changes client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); // check the response @@ -893,13 +875,13 @@ mod test_ethbridge_router { .unwrap(); let expected = { let total_power = client - .wl_storage + .state .pos_queries() .get_total_voting_power(Some(epoch)) .into(); let voting_powers_map: VotingPowersMap = client - .wl_storage + .state .ethbridge_queries() .get_consensus_eth_addresses(Some(epoch)) .iter() @@ -932,16 +914,15 @@ mod test_ethbridge_router { #[tokio::test] async fn test_read_consensus_valset_too_far_ahead() { let mut client = TestClient::new(RPC); - assert_eq!(client.wl_storage.storage.last_epoch.0, 0); + assert_eq!(client.state.in_mem().last_epoch.0, 0); // write validator to storage - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); // commit the changes client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); // check the response @@ -965,10 +946,10 @@ mod test_ethbridge_router { #[tokio::test] async fn test_read_valset_upd_proof() { let mut client = TestClient::new(RPC); - assert_eq!(client.wl_storage.storage.last_epoch.0, 0); + assert_eq!(client.state.in_mem().last_epoch.0, 0); // write validator to storage - let keys = test_utils::init_default_storage(&mut client.wl_storage); + let keys = test_utils::init_default_storage(&mut client.state); // write proof to storage let vext = validator_set_update::Vext { @@ -983,7 +964,7 @@ mod test_ethbridge_router { .eth_bridge, ); let tx_result = aggregate_votes( - &mut client.wl_storage, + &mut client.state, validator_set_update::VextDigest::singleton(vext.clone()), 0.into(), ) @@ -992,9 +973,8 @@ mod test_ethbridge_router { // commit the changes client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); // check the response @@ -1009,7 +989,7 @@ mod test_ethbridge_router { EthereumProof::new((1.into(), vext.0.data.voting_powers)); proof.attach_signature( client - .wl_storage + .state .ethbridge_queries() .get_eth_addr_book(&established_address_1(), Some(0.into())) .expect("Test failed"), @@ -1026,16 +1006,15 @@ mod test_ethbridge_router { #[tokio::test] async fn test_read_valset_upd_proof_too_far_ahead() { let mut client = TestClient::new(RPC); - assert_eq!(client.wl_storage.storage.last_epoch.0, 0); + assert_eq!(client.state.in_mem().last_epoch.0, 0); // write validator to storage - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); // commit the changes client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); // check the response @@ -1076,15 +1055,15 @@ mod test_ethbridge_router { }; // write a transfer into the bridge pool - client.wl_storage.storage.block.height = 1.into(); + client.state.in_mem_mut().block.height = 1.into(); client - .wl_storage + .state .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; // check the response let pool = RPC @@ -1118,29 +1097,29 @@ mod test_ethbridge_router { // write a transfer into the bridge pool client - .wl_storage + .state .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; // update the pool client - .wl_storage + .state .delete(&get_pending_key(&transfer)) .expect("Test failed"); let mut transfer2 = transfer; transfer2.transfer.amount = 1.into(); client - .wl_storage + .state .write(&get_pending_key(&transfer2), &transfer2) .expect("Test failed"); // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; // check the response let pool = RPC @@ -1173,11 +1152,11 @@ mod test_ethbridge_router { }; // write validator to storage - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); // write a transfer into the bridge pool client - .wl_storage + .state .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); @@ -1186,23 +1165,23 @@ mod test_ethbridge_router { signatures: Default::default(), data: (transfer.keccak256(), 0.into()), }; - let written_height = client.wl_storage.storage.block.height; + let written_height = client.state.in_mem().block.height; // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; // update the pool let mut transfer2 = transfer.clone(); transfer2.transfer.amount = 1.into(); client - .wl_storage + .state .write(&get_pending_key(&transfer2), transfer2) .expect("Test failed"); // add the signature for the pool at the previous block height client - .wl_storage + .state .write( &get_signed_root_key(), (signed_root.clone(), written_height), @@ -1210,8 +1189,8 @@ mod test_ethbridge_router { .expect("Test failed"); // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; let resp = RPC .shell() @@ -1241,7 +1220,7 @@ mod test_ethbridge_router { .expect("Test failed"); let (validator_args, voting_powers) = client - .wl_storage + .state .ethbridge_queries() .get_bridge_validator_set(None); let relay_proof = ethereum_structs::RelayProof { @@ -1283,11 +1262,11 @@ mod test_ethbridge_router { }, }; // write validator to storage - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); // write a transfer into the bridge pool client - .wl_storage + .state .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); @@ -1299,33 +1278,31 @@ mod test_ethbridge_router { // commit the changes and increase block height client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.in_mem_mut().block.height += 1; // update the pool let mut transfer2 = transfer; transfer2.transfer.amount = 1.into(); client - .wl_storage + .state .write(&get_pending_key(&transfer2), &transfer2) .expect("Test failed"); // add the signature for the pool at the previous block height client - .wl_storage + .state .write(&get_signed_root_key(), (signed_root, BlockHeight::from(0))) .expect("Test failed"); // commit the changes and increase block height client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.in_mem_mut().block.height += 1; // this is in the pool, but its merkle root has not been signed yet let resp = RPC @@ -1369,11 +1346,11 @@ mod test_ethbridge_router { }, }; // write validator to storage - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); // write a transfer into the bridge pool client - .wl_storage + .state .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); @@ -1382,29 +1359,29 @@ mod test_ethbridge_router { signatures: Default::default(), data: (transfer.keccak256(), 0.into()), }; - let written_height = client.wl_storage.storage.block.height; + let written_height = client.state.in_mem().block.height; // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; // update the pool let mut transfer2 = transfer.clone(); transfer2.transfer.amount = 1.into(); client - .wl_storage + .state .write(&get_pending_key(&transfer2), transfer2) .expect("Test failed"); // add the signature for the pool at the previous block height client - .wl_storage + .state .write(&get_signed_root_key(), (signed_root, written_height)) .expect("Test failed"); // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; let resp = RPC .shell() .eth_bridge() @@ -1435,16 +1412,16 @@ mod test_ethbridge_router { }; // write validator to storage let (_, dummy_validator_stake) = test_utils::default_validator(); - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); // write a transfer into the bridge pool client - .wl_storage + .state .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); - let event_transfer: namada_core::types::ethereum_events::TransferToEthereum - = (&transfer).into(); + let event_transfer: namada_core::ethereum_events::TransferToEthereum = + (&transfer).into(); let eth_event = EthereumEvent::TransfersToEthereum { nonce: Default::default(), transfers: vec![event_transfer.clone()], @@ -1453,11 +1430,11 @@ mod test_ethbridge_router { let eth_msg_key = vote_tallies::Keys::from(ð_event); let voting_power = FractionalVotingPower::HALF; client - .wl_storage + .state .write(ð_msg_key.body(), eth_event) .expect("Test failed"); client - .wl_storage + .state .write( ð_msg_key.voting_power(), EpochedVotingPower::from([( @@ -1467,32 +1444,30 @@ mod test_ethbridge_router { ) .expect("Test failed"); client - .wl_storage + .state .write(ð_msg_key.seen(), false) .expect("Test failed"); // commit the changes and increase block height client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.in_mem_mut().block.height += 1; // update the pool let mut transfer2 = transfer.clone(); transfer2.transfer.amount = 1.into(); client - .wl_storage + .state .write(&get_pending_key(&transfer2), transfer2) .expect("Test failed"); // commit the changes and increase block height client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.in_mem_mut().block.height += 1; let resp = RPC .shell() .eth_bridge() @@ -1513,7 +1488,7 @@ mod test_ethbridge_router { async fn test_cannot_get_proof_for_removed_transfer() { let mut client = TestClient::new(RPC); // write validator to storage - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); let transfer = PendingTransfer { transfer: TransferToEthereum { kind: TransferToEthereumKind::Erc20, @@ -1531,7 +1506,7 @@ mod test_ethbridge_router { // write a transfer into the bridge pool client - .wl_storage + .state .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); @@ -1540,29 +1515,29 @@ mod test_ethbridge_router { signatures: Default::default(), data: (transfer.keccak256(), 0.into()), }; - let written_height = client.wl_storage.storage.block.height; + let written_height = client.state.in_mem().block.height; // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; // update the pool let mut transfer2 = transfer.clone(); transfer2.transfer.amount = 1.into(); client - .wl_storage + .state .write(&get_pending_key(&transfer2), transfer2) .expect("Test failed"); // add the signature for the pool at the previous block height client - .wl_storage + .state .write(&get_signed_root_key(), (signed_root, written_height)) .expect("Test failed"); // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; // this was in the pool, covered by an old signed Merkle root. let resp = RPC .shell() @@ -1585,7 +1560,7 @@ mod test_ethbridge_router { // remove a transfer from the pool. client - .wl_storage + .state .delete(&get_pending_key(&transfer)) .expect("Test failed"); @@ -1617,10 +1592,10 @@ mod test_ethbridge_router { const ERC20_TOKEN: EthAddress = EthAddress([0; 20]); let mut client = TestClient::new(RPC); - assert_eq!(client.wl_storage.storage.last_epoch.0, 0); + assert_eq!(client.state.in_mem().last_epoch.0, 0); // initialize storage - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); // check supply - should be 0 let result = RPC @@ -1642,7 +1617,7 @@ mod test_ethbridge_router { } .into(); client - .wl_storage + .state .write(&key, supply_amount) .expect("Test failed"); let key = whitelist::Key { @@ -1650,10 +1625,7 @@ mod test_ethbridge_router { suffix: whitelist::KeyType::Cap, } .into(); - client - .wl_storage - .write(&key, cap_amount) - .expect("Test failed"); + client.state.write(&key, cap_amount).expect("Test failed"); // check that the supply was updated let result = RPC @@ -1689,7 +1661,7 @@ mod test_ethbridge_router { }, }; client - .wl_storage + .state .write(&get_pending_key(&transfer), transfer.clone()) .expect("Test failed"); @@ -1718,7 +1690,7 @@ mod test_ethbridge_router { transfer4.transfer.amount = 3.into(); // change block height - client.wl_storage.storage.block.height = 1.into(); + client.state.in_mem_mut().block.height = 1.into(); // write bridge pool signed root { @@ -1726,20 +1698,19 @@ mod test_ethbridge_router { signatures: Default::default(), data: (KeccakHash([0; 32]), 0.into()), }; - let written_height = client.wl_storage.storage.block.height; + let written_height = client.state.in_mem().block.height; client - .wl_storage + .state .write(&get_signed_root_key(), (signed_root, written_height)) .expect("Test failed"); client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); } // commit storage changes - client.wl_storage.commit_block().expect("Test failed"); + client.state.commit_block().expect("Test failed"); // check transfer statuses let status = RPC @@ -1791,7 +1762,8 @@ mod test_ethbridge_router { #[cfg(any(feature = "testing", test))] #[allow(dead_code)] mod test_utils { - use namada_core::types::address::Address; + use namada_core::address::Address; + #[allow(unused_imports)] pub use namada_ethereum_bridge::test_utils::*; /// An established user address for testing & development diff --git a/crates/sdk/src/queries/types.rs b/crates/sdk/src/queries/types.rs index 614a717a8e..b191cb59b5 100644 --- a/crates/sdk/src/queries/types.rs +++ b/crates/sdk/src/queries/types.rs @@ -1,7 +1,7 @@ use std::fmt::Debug; -use namada_core::types::storage::BlockHeight; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_core::storage::BlockHeight; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use thiserror::Error; use crate::events::log::EventLog; @@ -15,8 +15,8 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - /// Reference to the ledger's [`WlStorage`]. - pub wl_storage: &'shell WlStorage, + /// Reference to the ledger's [`WlState`]. + pub state: &'shell WlState, /// Log of events emitted by `FinalizeBlock` ABCI calls. pub event_log: &'shell EventLog, /// Cache of VP wasm compiled artifacts. diff --git a/crates/sdk/src/queries/vp/governance.rs b/crates/sdk/src/queries/vp/governance.rs index 60de9a3835..2e41282e43 100644 --- a/crates/sdk/src/queries/vp/governance.rs +++ b/crates/sdk/src/queries/vp/governance.rs @@ -24,7 +24,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_governance::storage::get_proposal_by_id(ctx.wl_storage, id) + namada_governance::storage::get_proposal_by_id(ctx.state, id) } /// Query all the votes for the given proposal id @@ -36,7 +36,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_governance::storage::get_proposal_votes(ctx.wl_storage, id) + namada_governance::storage::get_proposal_votes(ctx.state, id) } /// Get the governance parameters @@ -47,7 +47,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_governance::storage::get_parameters(ctx.wl_storage) + namada_governance::storage::get_parameters(ctx.state) } /// Get the governance proposal result stored in storage @@ -59,5 +59,5 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_governance::storage::get_proposal_result(ctx.wl_storage, id) + namada_governance::storage::get_proposal_result(ctx.state, id) } diff --git a/crates/sdk/src/queries/vp/pgf.rs b/crates/sdk/src/queries/vp/pgf.rs index 4b8431e854..7a4dbf0673 100644 --- a/crates/sdk/src/queries/vp/pgf.rs +++ b/crates/sdk/src/queries/vp/pgf.rs @@ -1,4 +1,4 @@ -use namada_core::types::address::Address; +use namada_core::address::Address; use namada_governance::pgf::parameters::PgfParameters; use namada_governance::pgf::storage::steward::StewardDetail; use namada_governance::storage::proposal::StoragePgfFunding; @@ -22,7 +22,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_governance::pgf::storage::get_stewards(ctx.wl_storage) + namada_governance::pgf::storage::get_stewards(ctx.state) } /// Check if an address is a pgf steward @@ -34,7 +34,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_governance::pgf::storage::is_steward(ctx.wl_storage, &address) + namada_governance::pgf::storage::is_steward(ctx.state, &address) } /// Query the continuous pgf fundings @@ -45,7 +45,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_governance::pgf::storage::get_payments(ctx.wl_storage) + namada_governance::pgf::storage::get_payments(ctx.state) } /// Query the PGF parameters @@ -56,5 +56,5 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_governance::pgf::storage::get_parameters(ctx.wl_storage) + namada_governance::pgf::storage::get_parameters(ctx.state) } diff --git a/crates/sdk/src/queries/vp/pos.rs b/crates/sdk/src/queries/vp/pos.rs index 47a7f33cf9..19235566aa 100644 --- a/crates/sdk/src/queries/vp/pos.rs +++ b/crates/sdk/src/queries/vp/pos.rs @@ -3,10 +3,10 @@ use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use namada_core::types::address::Address; -use namada_core::types::key::common; -use namada_core::types::storage::Epoch; -use namada_core::types::token; +use namada_core::address::Address; +use namada_core::key::common; +use namada_core::storage::Epoch; +use namada_core::token; use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::queries::{ find_delegation_validators, find_delegations, @@ -29,7 +29,7 @@ use namada_proof_of_stake::types::{ BondId, BondsAndUnbondsDetail, BondsAndUnbondsDetails, CommissionPair, Slash, ValidatorMetaData, ValidatorState, WeightedValidator, }; -use namada_proof_of_stake::{self, bond_amount, query_reward_tokens}; +use namada_proof_of_stake::{bond_amount, query_reward_tokens}; use namada_state::{DBIter, StorageHasher, DB}; use namada_storage::collections::lazy_map; use namada_storage::OptionExt; @@ -184,7 +184,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - read_pos_params(ctx.wl_storage) + read_pos_params(ctx.state) } /// Find if the given address belongs to a validator account. @@ -196,7 +196,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_proof_of_stake::is_validator(ctx.wl_storage, &addr) + namada_proof_of_stake::is_validator(ctx.state, &addr) } /// Find a consensus key of a validator account. @@ -208,9 +208,9 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let current_epoch = ctx.wl_storage.storage.last_epoch; + let current_epoch = ctx.state.in_mem().last_epoch; namada_proof_of_stake::storage::get_consensus_key( - ctx.wl_storage, + ctx.state, &addr, current_epoch, ) @@ -226,7 +226,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_proof_of_stake::is_delegator(ctx.wl_storage, &addr, epoch) + namada_proof_of_stake::is_delegator(ctx.state, &addr, epoch) } /// Get all the validator known addresses. These validators may be in any state, @@ -239,8 +239,8 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); - read_all_validator_addresses(ctx.wl_storage, epoch) + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); + read_all_validator_addresses(ctx.state, epoch) } /// Get the validator commission rate and max commission rate change per epoch @@ -253,15 +253,12 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); - let params = read_pos_params(ctx.wl_storage)?; - let commission_rate = validator_commission_rate_handle(&validator).get( - ctx.wl_storage, - epoch, - ¶ms, - )?; + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); + let params = read_pos_params(ctx.state)?; + let commission_rate = validator_commission_rate_handle(&validator) + .get(ctx.state, epoch, ¶ms)?; let max_commission_change_per_epoch = - read_validator_max_commission_rate_change(ctx.wl_storage, &validator)?; + read_validator_max_commission_rate_change(ctx.state, &validator)?; match (commission_rate, max_commission_change_per_epoch) { (Some(commission_rate), Some(max_commission_change_per_epoch)) => { @@ -283,12 +280,11 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let email = read_validator_email(ctx.wl_storage, &validator)?; - let description = read_validator_description(ctx.wl_storage, &validator)?; - let website = read_validator_website(ctx.wl_storage, &validator)?; - let discord_handle = - read_validator_discord_handle(ctx.wl_storage, &validator)?; - let avatar = read_validator_avatar(ctx.wl_storage, &validator)?; + let email = read_validator_email(ctx.state, &validator)?; + let description = read_validator_description(ctx.state, &validator)?; + let website = read_validator_website(ctx.state, &validator)?; + let discord_handle = read_validator_discord_handle(ctx.state, &validator)?; + let avatar = read_validator_avatar(ctx.state, &validator)?; // Email is the only required field for a validator in storage match email { @@ -313,13 +309,10 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); - let params = read_pos_params(ctx.wl_storage)?; - let state = validator_state_handle(&validator).get( - ctx.wl_storage, - epoch, - ¶ms, - )?; + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); + let params = read_pos_params(ctx.state)?; + let state = + validator_state_handle(&validator).get(ctx.state, epoch, ¶ms)?; Ok(state) } @@ -332,7 +325,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - read_validator_last_slash_epoch(ctx.wl_storage, &validator) + read_validator_last_slash_epoch(ctx.state, &validator) } /// Get the total stake of a validator at the given epoch or current when @@ -349,11 +342,11 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); - let params = read_pos_params(ctx.wl_storage)?; - if namada_proof_of_stake::is_validator(ctx.wl_storage, &validator)? { + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); + let params = read_pos_params(ctx.state)?; + if namada_proof_of_stake::is_validator(ctx.state, &validator)? { let stake = - read_validator_stake(ctx.wl_storage, ¶ms, &validator, epoch)?; + read_validator_stake(ctx.state, ¶ms, &validator, epoch)?; Ok(Some(stake)) } else { Ok(None) @@ -372,7 +365,7 @@ where H: 'static + StorageHasher + Sync, { let handle = validator_incoming_redelegations_handle(&src_validator); - handle.get(ctx.wl_storage, &delegator) + handle.get(ctx.state, &delegator) } /// Get all the validator in the consensus set with their bonded stake. @@ -384,8 +377,8 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); - read_consensus_validator_set_addresses_with_stake(ctx.wl_storage, epoch) + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); + read_consensus_validator_set_addresses_with_stake(ctx.state, epoch) } /// Get all the validator in the below-capacity set with their bonded stake. @@ -397,11 +390,8 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); - read_below_capacity_validator_set_addresses_with_stake( - ctx.wl_storage, - epoch, - ) + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); + read_below_capacity_validator_set_addresses_with_stake(ctx.state, epoch) } /// Get the total stake in PoS system at the given epoch or current when `None`. @@ -413,9 +403,9 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); - let params = read_pos_params(ctx.wl_storage)?; - read_total_stake(ctx.wl_storage, ¶ms, epoch) + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); + let params = read_pos_params(ctx.state)?; + read_total_stake(ctx.state, ¶ms, epoch) } fn bond_deltas( @@ -427,7 +417,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - bond_handle(&source, &validator).to_hashmap(ctx.wl_storage) + bond_handle(&source, &validator).to_hashmap(ctx.state) } /// Find the sum of bond amount up the given epoch when `Some`, or up to the @@ -442,13 +432,13 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let params = read_pos_params(ctx.wl_storage)?; - let epoch = epoch - .unwrap_or(ctx.wl_storage.storage.last_epoch + params.pipeline_len); + let params = read_pos_params(ctx.state)?; + let epoch = + epoch.unwrap_or(ctx.state.in_mem().last_epoch + params.pipeline_len); let handle = bond_handle(&source, &validator); handle - .get_sum(ctx.wl_storage, epoch, ¶ms)? + .get_sum(ctx.state, epoch, ¶ms)? .ok_or_err_msg("Cannot find bond") } @@ -462,10 +452,10 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); let bond_id = BondId { source, validator }; - bond_amount(ctx.wl_storage, &bond_id, epoch) + bond_amount(ctx.state, &bond_id, epoch) } fn unbond( @@ -478,7 +468,7 @@ where H: 'static + StorageHasher + Sync, { let handle = unbond_handle(&source, &validator); - let iter = handle.iter(ctx.wl_storage)?; + let iter = handle.iter(ctx.state)?; iter.map(|next_result| { next_result.map( |( @@ -504,7 +494,7 @@ where { // TODO slashes let handle = unbond_handle(&source, &validator); - let iter = handle.iter(ctx.wl_storage)?; + let iter = handle.iter(ctx.state)?; iter.map(|next_result| { next_result.map( |( @@ -529,11 +519,11 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); let handle = unbond_handle(&source, &validator); let mut total = token::Amount::zero(); - for result in handle.iter(ctx.wl_storage)? { + for result in handle.iter(ctx.state)? { let ( lazy_map::NestedSubKey::Data { key: end, @@ -557,13 +547,8 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let current_epoch = ctx.wl_storage.storage.last_epoch; - query_reward_tokens( - ctx.wl_storage, - source.as_ref(), - &validator, - current_epoch, - ) + let current_epoch = ctx.state.in_mem().last_epoch; + query_reward_tokens(ctx.state, source.as_ref(), &validator, current_epoch) } fn bonds_and_unbonds( @@ -576,9 +561,7 @@ where H: 'static + StorageHasher + Sync, { namada_proof_of_stake::queries::bonds_and_unbonds( - ctx.wl_storage, - source, - validator, + ctx.state, source, validator, ) } @@ -592,7 +575,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - find_delegation_validators(ctx.wl_storage, &owner) + find_delegation_validators(ctx.state, &owner) } /// Find all the validator addresses to whom the given `owner` address has @@ -606,8 +589,8 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); - find_delegations(ctx.wl_storage, &owner, &epoch) + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); + find_delegations(ctx.state, &owner, &epoch) } /// Validator slashes @@ -620,7 +603,7 @@ where H: 'static + StorageHasher + Sync, { let slash_handle = validator_slashes_handle(&validator); - slash_handle.iter(ctx.wl_storage)?.collect() + slash_handle.iter(ctx.state)?.collect() } /// All slashes @@ -631,7 +614,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - find_all_slashes(ctx.wl_storage) + find_all_slashes(ctx.state) } /// Enqueued slashes @@ -642,8 +625,8 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let current_epoch = ctx.wl_storage.storage.last_epoch; - find_all_enqueued_slashes(ctx.wl_storage, current_epoch) + let current_epoch = ctx.state.in_mem().last_epoch; + find_all_enqueued_slashes(ctx.state, current_epoch) } /// Native validator address by looking up the Tendermint address @@ -656,8 +639,7 @@ where H: 'static + StorageHasher + Sync, { namada_proof_of_stake::storage::find_validator_by_raw_hash( - ctx.wl_storage, - tm_addr, + ctx.state, tm_addr, ) } @@ -669,7 +651,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_proof_of_stake::storage::get_consensus_key_set(ctx.wl_storage) + namada_proof_of_stake::storage::get_consensus_key_set(ctx.state) } /// Find if the given source address has any bonds. @@ -681,7 +663,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_proof_of_stake::queries::has_bonds(ctx.wl_storage, &source) + namada_proof_of_stake::queries::has_bonds(ctx.state, &source) } /// Client-only methods for the router type are composed from router functions. diff --git a/crates/sdk/src/queries/vp/token.rs b/crates/sdk/src/queries/vp/token.rs index fb77a396a4..cf6e1b9c42 100644 --- a/crates/sdk/src/queries/vp/token.rs +++ b/crates/sdk/src/queries/vp/token.rs @@ -1,7 +1,7 @@ //! Token validity predicate queries -use namada_core::types::address::Address; -use namada_core::types::token; +use namada_core::address::Address; +use namada_core::token; use namada_state::{DBIter, StorageHasher, DB}; use namada_token::{read_denom, read_total_supply}; @@ -22,7 +22,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - read_denom(ctx.wl_storage, &addr) + read_denom(ctx.state, &addr) } /// Get the total supply for a token address @@ -34,14 +34,14 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - read_total_supply(ctx.wl_storage, &addr) + read_total_supply(ctx.state, &addr) } #[cfg(any(test, feature = "async-client"))] pub mod client_only_methods { use borsh::BorshDeserialize; - use namada_core::types::address::Address; - use namada_core::types::token; + use namada_core::address::Address; + use namada_core::token; use namada_token::storage_key::balance_key; use super::Token; diff --git a/crates/sdk/src/rpc.rs b/crates/sdk/src/rpc.rs index b0d2b9994d..9b7de172fb 100644 --- a/crates/sdk/src/rpc.rs +++ b/crates/sdk/src/rpc.rs @@ -10,16 +10,16 @@ use masp_primitives::asset_type::AssetType; use masp_primitives::merkle_tree::MerklePath; use masp_primitives::sapling::Node; use namada_account::Account; -use namada_core::types::address::{Address, InternalAddress}; -use namada_core::types::hash::Hash; -use namada_core::types::key::common; -use namada_core::types::storage::{ +use namada_core::address::{Address, InternalAddress}; +use namada_core::hash::Hash; +use namada_core::key::common; +use namada_core::storage::{ BlockHeight, BlockResults, Epoch, Key, PrefixValue, }; -use namada_core::types::token::{ +use namada_core::token::{ Amount, DenominatedAmount, Denomination, MaspDigitPos, }; -use namada_core::types::{storage, token}; +use namada_core::{storage, token}; use namada_governance::parameters::GovernanceParameters; use namada_governance::pgf::parameters::PgfParameters; use namada_governance::pgf::storage::steward::StewardDetail; @@ -685,7 +685,7 @@ pub async fn query_tx_response( // Get the block results corresponding to a block to which // the specified transaction belongs let block = &blocks - .get(0) + .first() .ok_or_else(|| { TError::server( "Unable to find a block applying the given transaction" diff --git a/crates/sdk/src/signing.rs b/crates/sdk/src/signing.rs index 57216c15d1..6b4b7e7357 100644 --- a/crates/sdk/src/signing.rs +++ b/crates/sdk/src/signing.rs @@ -12,17 +12,15 @@ use masp_primitives::transaction::components::sapling::fees::{ }; use masp_primitives::transaction::Transaction; use namada_account::{AccountPublicKeysMap, InitAccount, UpdateAccount}; -use namada_core::types::address::{ - Address, ImplicitAddress, InternalAddress, MASP, -}; -use namada_core::types::key::*; -use namada_core::types::masp::{AssetData, ExtendedViewingKey, PaymentAddress}; -use namada_core::types::sign::SignatureIndex; -use namada_core::types::storage::Epoch; -use namada_core::types::token; -use namada_core::types::token::Transfer; -// use namada_core::types::storage::Key; -use namada_core::types::token::{Amount, DenominatedAmount}; +use namada_core::address::{Address, ImplicitAddress, InternalAddress, MASP}; +use namada_core::key::*; +use namada_core::masp::{AssetData, ExtendedViewingKey, PaymentAddress}; +use namada_core::sign::SignatureIndex; +use namada_core::storage::Epoch; +use namada_core::token; +use namada_core::token::Transfer; +// use namada_core::storage::Key; +use namada_core::token::{Amount, DenominatedAmount}; use namada_governance::storage::proposal::{ InitProposalData, ProposalType, VoteProposalData, }; @@ -42,12 +40,14 @@ use tokio::sync::RwLock; use super::masp::{ShieldedContext, ShieldedTransfer}; use crate::args::SdkTypes; use crate::error::{EncodingError, Error, TxSubmitError}; +use crate::eth_bridge_pool::PendingTransfer; +use crate::governance::storage::proposal::{AddRemove, PGFAction, PGFTarget}; use crate::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; use crate::ibc::primitives::proto::Any; use crate::io::*; use crate::rpc::validate_amount; use crate::tx::{ - TX_BECOME_VALIDATOR_WASM, TX_BOND_WASM, TX_BRIDGE_POOL_WASM, + Commitment, TX_BECOME_VALIDATOR_WASM, TX_BOND_WASM, TX_BRIDGE_POOL_WASM, TX_CHANGE_COMMISSION_WASM, TX_CHANGE_CONSENSUS_KEY_WASM, TX_CHANGE_METADATA_WASM, TX_CLAIM_REWARDS_WASM, TX_DEACTIVATE_VALIDATOR_WASM, TX_IBC_WASM, TX_INIT_ACCOUNT_WASM, @@ -56,7 +56,6 @@ use crate::tx::{ TX_UNJAIL_VALIDATOR_WASM, TX_UPDATE_ACCOUNT_WASM, TX_UPDATE_STEWARD_COMMISSION, TX_VOTE_PROPOSAL, TX_WITHDRAW_WASM, }; -use crate::types::eth_bridge_pool::PendingTransfer; pub use crate::wallet::store::AddressVpType; use crate::wallet::{Wallet, WalletIo}; use crate::{args, display_line, rpc, MaybeSend, Namada}; @@ -348,7 +347,7 @@ pub async fn aux_signing_data( match &args.wrapper_fee_payer { Some(keypair) => keypair.clone(), None => public_keys - .get(0) + .first() .ok_or(TxSubmitError::InvalidFeePayer)? .clone(), } @@ -388,7 +387,7 @@ pub async fn init_validator_signing_data( match &args.wrapper_fee_payer { Some(keypair) => keypair.clone(), None => public_keys - .get(0) + .first() .ok_or(TxSubmitError::InvalidFeePayer)? .clone(), } @@ -497,7 +496,7 @@ pub async fn validate_fee_and_gen_unshield( Some(diff) if !diff.is_zero() => { if let Some(spending_key) = args.fee_unshield.clone() { // Unshield funds for fee payment - let target = namada_core::types::masp::TransferTarget::Address( + let target = namada_core::masp::TransferTarget::Address( fee_payer_address.clone(), ); let fee_amount = DenominatedAmount::new( @@ -645,7 +644,7 @@ pub async fn wrap_tx( let mut hasher = sha2::Sha256::new(); section.hash(&mut hasher); tx.add_section(section); - namada_core::types::hash::Hash(hasher.finalize().into()) + namada_core::hash::Hash(hasher.finalize().into()) }); tx.add_wrapper( @@ -777,9 +776,6 @@ fn format_outputs(output: &mut Vec) { let key = key.trim().chars().take(MAX_KEY_LEN - 1).collect::(); // Trim value because we will insert spaces later value = value.trim(); - if value.is_empty() { - value = "(none)" - } if value.chars().count() < MAX_VALUE_LEN { // No need to split the line in this case output[pos] = format!("{} | {} : {}", i, key, value); @@ -935,6 +931,139 @@ impl<'a> Display for LedgerProposalType<'a> { } } +fn proposal_type_to_ledger_vector( + proposal_type: &ProposalType, + tx: &Tx, + output: &mut Vec, +) { + match proposal_type { + ProposalType::Default(None) => { + output.push("Proposal type : Default".to_string()) + } + ProposalType::Default(Some(hash)) => { + output.push("Proposal type : Default".to_string()); + let extra = tx + .get_section(hash) + .and_then(|x| Section::extra_data_sec(x.as_ref())) + .expect("unable to load vp code") + .code + .hash(); + output + .push(format!("Proposal hash : {}", HEXLOWER.encode(&extra.0))); + } + ProposalType::PGFSteward(actions) => { + output.push("Proposal type : PGF Steward".to_string()); + let mut actions = actions.iter().collect::>(); + // Print the test vectors in the same order as the serializations + actions.sort(); + for action in actions { + match action { + AddRemove::Add(addr) => { + output.push(format!("Add : {}", addr)) + } + AddRemove::Remove(addr) => { + output.push(format!("Remove : {}", addr)) + } + } + } + } + ProposalType::PGFPayment(actions) => { + output.push("Proposal type : PGF Payment".to_string()); + for action in actions { + match action { + PGFAction::Continuous(AddRemove::Add( + PGFTarget::Internal(target), + )) => { + output.push( + "PGF Action : Add Continuous Payment".to_string(), + ); + output.push(format!("Target: {}", target.target)); + output.push(format!( + "Amount: NAM {}", + to_ledger_decimal( + &target.amount.to_string_native() + ) + )); + } + PGFAction::Continuous(AddRemove::Add(PGFTarget::Ibc( + target, + ))) => { + output.push( + "PGF Action : Add Continuous Payment".to_string(), + ); + output.push(format!("Target: {}", target.target)); + output.push(format!( + "Amount: NAM {}", + to_ledger_decimal( + &target.amount.to_string_native() + ) + )); + output.push(format!("Port ID: {}", target.port_id)); + output + .push(format!("Channel ID: {}", target.channel_id)); + } + PGFAction::Continuous(AddRemove::Remove( + PGFTarget::Internal(target), + )) => { + output.push( + "PGF Action : Remove Continuous Payment" + .to_string(), + ); + output.push(format!("Target: {}", target.target)); + output.push(format!( + "Amount: NAM {}", + to_ledger_decimal( + &target.amount.to_string_native() + ) + )); + } + PGFAction::Continuous(AddRemove::Remove( + PGFTarget::Ibc(target), + )) => { + output.push( + "PGF Action : Remove Continuous Payment" + .to_string(), + ); + output.push(format!("Target: {}", target.target)); + output.push(format!( + "Amount: NAM {}", + to_ledger_decimal( + &target.amount.to_string_native() + ) + )); + output.push(format!("Port ID: {}", target.port_id)); + output + .push(format!("Channel ID: {}", target.channel_id)); + } + PGFAction::Retro(PGFTarget::Internal(target)) => { + output.push("PGF Action : Retro Payment".to_string()); + output.push(format!("Target: {}", target.target)); + output.push(format!( + "Amount: NAM {}", + to_ledger_decimal( + &target.amount.to_string_native() + ) + )); + } + PGFAction::Retro(PGFTarget::Ibc(target)) => { + output.push("PGF Action : Retro Payment".to_string()); + output.push(format!("Target: {}", target.target)); + output.push(format!( + "Amount: NAM {}", + to_ledger_decimal( + &target.amount.to_string_native() + ) + )); + output.push(format!("Port ID: {}", target.port_id)); + output + .push(format!("Channel ID: {}", target.channel_id)); + } + } + } + } + } +} + /// Converts the given transaction to the form that is displayed on the Ledger /// device pub async fn to_ledger_vector( @@ -1078,11 +1207,12 @@ pub async fn to_ledger_vector( tv.output.push("Type : Init proposal".to_string()); tv.output.push(format!("ID : {}", init_proposal_data.id)); + proposal_type_to_ledger_vector( + &init_proposal_data.r#type, + tx, + &mut tv.output, + ); tv.output.extend(vec![ - format!( - "Proposal type : {}", - LedgerProposalType(&init_proposal_data.r#type, tx) - ), format!("Author : {}", init_proposal_data.author), format!( "Voting start epoch : {}", @@ -1098,11 +1228,12 @@ pub async fn to_ledger_vector( tv.output_expert .push(format!("ID : {}", init_proposal_data.id)); + proposal_type_to_ledger_vector( + &init_proposal_data.r#type, + tx, + &mut tv.output_expert, + ); tv.output_expert.extend(vec![ - format!( - "Proposal type : {}", - LedgerProposalType(&init_proposal_data.r#type, tx) - ), format!("Author : {}", init_proposal_data.author), format!( "Voting start epoch : {}", @@ -1464,30 +1595,24 @@ pub async fn to_ledger_vector( tv.output.extend(vec!["Type : Change metadata".to_string()]); let mut other_items = vec![]; + other_items.push(format!("Validator : {}", metadata_change.validator)); if let Some(email) = metadata_change.email { - other_items.push(format!("New email : {}", email)); + other_items.push(format!("Email : {}", email)); } if let Some(description) = metadata_change.description { - if description.is_empty() { - other_items.push("Description removed".to_string()); - } else { - other_items.push(format!("New description : {}", description)); - } + other_items.push(format!("Description : {}", description)); } if let Some(website) = metadata_change.website { - if website.is_empty() { - other_items.push("Website removed".to_string()); - } else { - other_items.push(format!("New website : {}", website)); - } + other_items.push(format!("Website : {}", website)); } if let Some(discord_handle) = metadata_change.discord_handle { - if discord_handle.is_empty() { - other_items.push("Discord handle removed".to_string()); - } else { - other_items - .push(format!("New discord handle : {}", discord_handle)); - } + other_items.push(format!("Discord handle : {}", discord_handle)); + } + if let Some(avatar) = metadata_change.avatar { + other_items.push(format!("Avatar : {}", avatar)); + } + if let Some(commission_rate) = metadata_change.commission_rate { + other_items.push(format!("Commission rate : {}", commission_rate)); } tv.output.extend(other_items.clone()); @@ -1615,15 +1740,19 @@ pub async fn to_ledger_vector( format!("Type : Update Steward Commission"), format!("Steward : {}", update.steward), ]); - for (address, dec) in &update.commission { - tv.output.push(format!("Commission : {} {}", address, dec)); + let mut commission = update.commission.iter().collect::>(); + // Print the test vectors in the same order as the serializations + commission.sort_by(|(a, _), (b, _)| a.cmp(b)); + for (address, dec) in &commission { + tv.output.push(format!("Validator : {}", address)); + tv.output.push(format!("Commission Rate : {}", dec)); } tv.output_expert .push(format!("Steward : {}", update.steward)); - for (address, dec) in &update.commission { - tv.output_expert - .push(format!("Commission : {} {}", address, dec)); + for (address, dec) in &commission { + tv.output_expert.push(format!("Validator : {}", address)); + tv.output_expert.push(format!("Commission Rate : {}", dec)); } } else if code_sec.tag == Some(TX_RESIGN_STEWARD.to_string()) { let address = Address::try_from_slice( @@ -1680,6 +1809,30 @@ pub async fn to_ledger_vector( tv.output.push("Type : Custom".to_string()); } + if tx.memo_sechash() != &namada_core::hash::Hash::default() { + match tx + .get_section(tx.memo_sechash()) + .unwrap() + .extra_data_sec() + .unwrap() + .code + { + Commitment::Hash(hash) => { + tv.output + .push(format!("Memo Hash : {}", HEXLOWER.encode(&hash.0))); + tv.output_expert + .push(format!("Memo Hash : {}", HEXLOWER.encode(&hash.0))); + } + Commitment::Id(id) => { + let memo = String::from_utf8(id).map_err(|err| { + Error::from(EncodingError::Conversion(err.to_string())) + })?; + tv.output.push(format!("Memo : {}", memo)); + tv.output_expert.push(format!("Memo : {}", memo)); + } + } + } + if let Some(wrapper) = tx.header.wrapper() { let fee_amount_per_gas_unit = to_ledger_decimal(&wrapper.fee.amount_per_gas_unit.to_string()); diff --git a/crates/sdk/src/tx.rs b/crates/sdk/src/tx.rs index da3e474d2d..dbc8cb0a39 100644 --- a/crates/sdk/src/tx.rs +++ b/crates/sdk/src/tx.rs @@ -19,6 +19,9 @@ use masp_primitives::transaction::components::transparent::fees::{ }; use masp_primitives::transaction::components::I128Sum; use namada_account::{InitAccount, UpdateAccount}; +use namada_core::address::{Address, InternalAddress, MASP}; +use namada_core::dec::Dec; +use namada_core::hash::Hash; use namada_core::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; use namada_core::ibc::apps::transfer::types::packet::PacketData; use namada_core::ibc::apps::transfer::types::PrefixedCoin; @@ -26,15 +29,12 @@ use namada_core::ibc::core::channel::types::timeout::TimeoutHeight; use namada_core::ibc::core::client::types::Height as IbcHeight; use namada_core::ibc::core::host::types::identifiers::{ChannelId, PortId}; use namada_core::ibc::primitives::{Msg, Timestamp as IbcTimestamp}; -use namada_core::types::address::{Address, InternalAddress, MASP}; -use namada_core::types::dec::Dec; -use namada_core::types::hash::Hash; -use namada_core::types::ibc::{IbcShieldedTransfer, MsgShieldedTransfer}; -use namada_core::types::key::*; -use namada_core::types::masp::{AssetData, TransferSource, TransferTarget}; -use namada_core::types::storage::Epoch; -use namada_core::types::time::DateTimeUtc; -use namada_core::types::{storage, token}; +use namada_core::ibc::{IbcShieldedTransfer, MsgShieldedTransfer}; +use namada_core::key::*; +use namada_core::masp::{AssetData, TransferSource, TransferTarget}; +use namada_core::storage::Epoch; +use namada_core::time::DateTimeUtc; +use namada_core::{storage, token}; use namada_governance::cli::onchain::{ DefaultProposal, OnChainProposal, PgfFundingProposal, PgfStewardProposal, }; diff --git a/crates/sdk/src/wallet/alias.rs b/crates/sdk/src/wallet/alias.rs index 48ab4a9fa0..b154b7e13a 100644 --- a/crates/sdk/src/wallet/alias.rs +++ b/crates/sdk/src/wallet/alias.rs @@ -7,7 +7,7 @@ use std::io::Read; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::address::{Address, InternalAddress}; +use namada_core::address::{Address, InternalAddress}; use serde::{Deserialize, Serialize}; /// Aliases created from raw strings are kept in-memory as given, but their @@ -87,7 +87,7 @@ impl PartialEq for Alias { impl PartialOrd for Alias { fn partial_cmp(&self, other: &Self) -> Option { - self.normalize().partial_cmp(&other.normalize()) + Some(self.cmp(other)) } } diff --git a/crates/sdk/src/wallet/derivation_path.rs b/crates/sdk/src/wallet/derivation_path.rs index 3210450d26..c6a365cf40 100644 --- a/crates/sdk/src/wallet/derivation_path.rs +++ b/crates/sdk/src/wallet/derivation_path.rs @@ -3,7 +3,7 @@ use std::str::FromStr; use derivation_path::{ChildIndex, DerivationPath as DerivationPathInner}; use masp_primitives::zip32; -use namada_core::types::key::SchemeType; +use namada_core::key::SchemeType; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use thiserror::Error; use tiny_hderive::bip44::{ @@ -64,7 +64,7 @@ impl DerivationPath { pub fn is_bip44_conform(&self, strict: bool) -> bool { // check the path conforms the structure: // m / purpose' / coin_type' / account' / change / address_index - let purpose = self.0.as_ref().get(0); + let purpose = self.0.as_ref().first(); let coin_type = self.0.as_ref().get(1); let account = self.0.as_ref().get(2); let change = self.0.as_ref().get(3); @@ -108,7 +108,7 @@ impl DerivationPath { // check the path conforms one of the structure: // m / purpose' / coin_type' / account' // m / purpose' / coin_type' / account' / address_index - let purpose = self.0.as_ref().get(0); + let purpose = self.0.as_ref().first(); let coin_type = self.0.as_ref().get(1); let account = self.0.as_ref().get(2); let address = self.0.as_ref().get(3); @@ -272,7 +272,7 @@ impl From for Vec { #[cfg(test)] mod tests { - use namada_core::types::key::SchemeType; + use namada_core::key::SchemeType; use super::DerivationPath; diff --git a/crates/sdk/src/wallet/keys.rs b/crates/sdk/src/wallet/keys.rs index 64a052dd41..7c94df49d6 100644 --- a/crates/sdk/src/wallet/keys.rs +++ b/crates/sdk/src/wallet/keys.rs @@ -97,8 +97,9 @@ where }) .map_err(D::Error::custom) } else { - Err(DeserializeStoredKeypairError::MissingPrefix) - .map_err(D::Error::custom) + Err(D::Error::custom( + DeserializeStoredKeypairError::MissingPrefix, + )) } } } diff --git a/crates/sdk/src/wallet/mod.rs b/crates/sdk/src/wallet/mod.rs index c6aa765375..4d90279c42 100644 --- a/crates/sdk/src/wallet/mod.rs +++ b/crates/sdk/src/wallet/mod.rs @@ -12,9 +12,9 @@ use std::str::FromStr; use alias::Alias; use bip39::{Language, Mnemonic, MnemonicType, Seed}; use borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::address::Address; -use namada_core::types::key::*; -use namada_core::types::masp::{ +use namada_core::address::Address; +use namada_core::key::*; +use namada_core::masp::{ ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, }; pub use pre_genesis::gen_key_to_store; diff --git a/crates/sdk/src/wallet/pre_genesis.rs b/crates/sdk/src/wallet/pre_genesis.rs index abbb918470..dbe1a11571 100644 --- a/crates/sdk/src/wallet/pre_genesis.rs +++ b/crates/sdk/src/wallet/pre_genesis.rs @@ -1,5 +1,5 @@ //! Provides functionality for managing validator keys -use namada_core::types::key::{common, SchemeType}; +use namada_core::key::{common, SchemeType}; use rand::{CryptoRng, Rng}; use serde::{Deserialize, Serialize}; use thiserror::Error; diff --git a/crates/sdk/src/wallet/store.rs b/crates/sdk/src/wallet/store.rs index 37d2dc1e44..068b81a9fd 100644 --- a/crates/sdk/src/wallet/store.rs +++ b/crates/sdk/src/wallet/store.rs @@ -7,13 +7,12 @@ use std::str::FromStr; use bimap::BiBTreeMap; use itertools::Itertools; use masp_primitives::zip32; -use namada_core::types::address::{Address, ImplicitAddress}; -use namada_core::types::key::*; -use namada_core::types::masp::{ +use namada_core::address::{Address, ImplicitAddress}; +use namada_core::key::*; +use namada_core::masp::{ ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, }; use serde::{Deserialize, Serialize}; -use slip10_ed25519; use zeroize::Zeroizing; use super::alias::{self, Alias}; @@ -633,7 +632,7 @@ impl Store { other.store.tendermint_node_key, ), ]; - self.secret_keys.extend(keys.into_iter()); + self.secret_keys.extend(keys); let consensus_pk = other.consensus_key.ref_to(); let tendermint_node_pk = other.tendermint_node_key.ref_to(); @@ -644,15 +643,15 @@ impl Store { tendermint_node_pk.clone(), ), ]; - self.public_keys.extend(public_keys.clone().into_iter()); + self.public_keys.extend(public_keys.clone()); self.addresses - .extend(public_keys.into_iter().map(|(k, v)| (k, (&v).into()))); + .extend(public_keys.map(|(k, v)| (k, (&v).into()))); let pkhs = [ ((&consensus_pk).into(), consensus_key_alias), ((&tendermint_node_pk).into(), tendermint_node_key_alias), ]; - self.pkhs.extend(pkhs.into_iter()); + self.pkhs.extend(pkhs); self.validator_data = Some(ValidatorData { address: validator_address, @@ -789,11 +788,10 @@ impl<'de> Deserialize<'de> for AddressVpType { #[cfg(test)] mod test_wallet { - use base58::{self, FromBase58}; + use base58::FromBase58; use bip39::{Language, Mnemonic, Seed}; use data_encoding::HEXLOWER; - use super::super::derivation_path::DerivationPath; use super::*; #[test] diff --git a/crates/shielded_token/Cargo.toml b/crates/shielded_token/Cargo.toml index 87302cd42f..6b73901484 100644 --- a/crates/shielded_token/Cargo.toml +++ b/crates/shielded_token/Cargo.toml @@ -20,17 +20,18 @@ testing = ["multicore", "namada_core/testing"] [dependencies] namada_core = { path = "../core" } namada_parameters = { path = "../parameters" } -namada_state = { path = "../state" } namada_storage = { path = "../storage" } namada_trans_token = { path = "../trans_token" } +borsh.workspace = true masp_primitives.workspace = true rayon = { workspace = true, optional = true } +serde.workspace = true tracing.workspace = true [dev-dependencies] namada_core = { path = "../core", features = ["testing"] } -namada_state = { path = "../state", features = ["testing"] } +namada_storage = { path = "../storage", features = ["testing"] } proptest.workspace = true rayon.workspace = true diff --git a/crates/shielded_token/src/conversion.rs b/crates/shielded_token/src/conversion.rs index 23db438d42..f9f5826271 100644 --- a/crates/shielded_token/src/conversion.rs +++ b/crates/shielded_token/src/conversion.rs @@ -1,14 +1,13 @@ //! MASP rewards conversions -use namada_core::ledger::inflation::{ - ShieldedRewardsController, ShieldedValsToUpdate, -}; -use namada_core::types::address::{Address, MASP}; -use namada_core::types::dec::Dec; -use namada_core::types::uint::Uint; +use namada_core::address::{Address, MASP}; +use namada_core::dec::Dec; +use namada_core::uint::Uint; use namada_parameters as parameters; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; use namada_storage::{StorageRead, StorageWrite}; +use namada_trans_token::inflation::{ + ShieldedRewardsController, ShieldedValsToUpdate, +}; use namada_trans_token::storage_key::{balance_key, minted_balance_key}; use namada_trans_token::{read_denom, Amount, DenominatedAmount, Denomination}; @@ -17,19 +16,19 @@ use crate::storage_key::{ masp_last_locked_amount_key, masp_locked_amount_target_key, masp_max_reward_rate_key, }; +use crate::WithConversionState; /// Compute the precision of MASP rewards for the given token. This function /// must be a non-zero constant for a given token. -pub fn calculate_masp_rewards_precision( - wl_storage: &mut WlStorage, +pub fn calculate_masp_rewards_precision( + storage: &mut S, addr: &Address, ) -> namada_storage::Result<(u128, Denomination)> where - D: 'static + DB + for<'iter> DBIter<'iter>, - H: 'static + StorageHasher, + S: StorageWrite + StorageRead, { - let denomination = read_denom(wl_storage, addr)? - .expect("failed to read token denomination"); + let denomination = + read_denom(storage, addr)?.expect("failed to read token denomination"); // Inflation is implicitly denominated by this value. The lower this // figure, the less precise inflation computations are. This is especially // problematic when inflation is coming from a token with much higher @@ -45,58 +44,58 @@ where /// Compute the MASP rewards by applying the PD-controller to the genesis /// parameters and the last inflation and last locked rewards ratio values. -pub fn calculate_masp_rewards( - wl_storage: &mut WlStorage, +pub fn calculate_masp_rewards( + storage: &mut S, token: &Address, ) -> namada_storage::Result<((u128, u128), Denomination)> where - D: 'static + DB + for<'iter> DBIter<'iter>, - H: 'static + StorageHasher, + S: StorageWrite + StorageRead, { let (precision, denomination) = - calculate_masp_rewards_precision(wl_storage, token)?; + calculate_masp_rewards_precision(storage, token)?; let masp_addr = MASP; // Query the storage for information ------------------------- + let native_token = storage.get_native_token()?; //// information about the amount of native tokens on the chain - let total_native_tokens: Amount = wl_storage - .read(&minted_balance_key(&wl_storage.storage.native_token))? + let total_native_tokens: Amount = storage + .read(&minted_balance_key(&native_token))? .expect("the total supply key should be here"); // total locked amount in the Shielded pool - let total_tokens_in_masp: Amount = wl_storage + let total_tokens_in_masp: Amount = storage .read(&balance_key(token, &masp_addr))? .unwrap_or_default(); - let epochs_per_year: u64 = wl_storage + let epochs_per_year: u64 = storage .read(¶meters::storage::get_epochs_per_year_key())? .expect("epochs per year should properly decode"); //// Values from the last epoch - let last_inflation: Amount = wl_storage + let last_inflation: Amount = storage .read(&masp_last_inflation_key(token))? .expect("failure to read last inflation"); - let last_locked_amount: Amount = wl_storage + let last_locked_amount: Amount = storage .read(&masp_last_locked_amount_key(token))? .expect("failure to read last inflation"); //// Parameters for each token - let max_reward_rate: Dec = wl_storage + let max_reward_rate: Dec = storage .read(&masp_max_reward_rate_key(token))? .expect("max reward should properly decode"); - let kp_gain_nom: Dec = wl_storage + let kp_gain_nom: Dec = storage .read(&masp_kp_gain_key(token))? .expect("kp_gain_nom reward should properly decode"); - let kd_gain_nom: Dec = wl_storage + let kd_gain_nom: Dec = storage .read(&masp_kd_gain_key(token))? .expect("kd_gain_nom reward should properly decode"); - let target_locked_amount: Amount = wl_storage + let target_locked_amount: Amount = storage .read(&masp_locked_amount_target_key(token))? .expect("locked ratio target should properly decode"); @@ -173,23 +172,32 @@ where // but we should make sure the return value's ratio matches // this new inflation rate in 'update_allowed_conversions', // otherwise we will have an inaccurate view of inflation - wl_storage.write(&masp_last_inflation_key(token), inflation_amount)?; + storage.write(&masp_last_inflation_key(token), inflation_amount)?; - wl_storage - .write(&masp_last_locked_amount_key(token), total_tokens_in_masp)?; + storage.write(&masp_last_locked_amount_key(token), total_tokens_in_masp)?; Ok(((noterized_inflation, precision), denomination)) } // This is only enabled when "wasm-runtime" is on, because we're using rayon +#[cfg(not(any(feature = "multicore", test)))] +/// Update the MASP's allowed conversions +pub fn update_allowed_conversions( + _storage: &mut S, +) -> namada_storage::Result<()> +where + S: StorageWrite + StorageRead + WithConversionState, +{ + Ok(()) +} + #[cfg(any(feature = "multicore", test))] /// Update the MASP's allowed conversions -pub fn update_allowed_conversions( - wl_storage: &mut WlStorage, +pub fn update_allowed_conversions( + storage: &mut S, ) -> namada_storage::Result<()> where - D: 'static + DB + for<'iter> DBIter<'iter>, - H: 'static + StorageHasher, + S: StorageWrite + StorageRead + WithConversionState, { use std::cmp::Ordering; use std::collections::BTreeMap; @@ -200,8 +208,8 @@ where use masp_primitives::merkle_tree::FrozenCommitmentTree; use masp_primitives::sapling::Node; use masp_primitives::transaction::components::I128Sum as MaspAmount; - use namada_core::types::masp::encode_asset_type; - use namada_core::types::storage::Epoch; + use namada_core::masp::encode_asset_type; + use namada_core::storage::Epoch; use namada_storage::ResultExt; use namada_trans_token::{MaspDigitPos, NATIVE_MAX_DECIMAL_PLACES}; use rayon::iter::{ @@ -212,9 +220,8 @@ where // The derived conversions will be placed in MASP address space let masp_addr = MASP; - let mut masp_reward_keys: Vec<_> = wl_storage - .storage - .conversion_state + let mut masp_reward_keys: Vec<_> = storage + .conversion_state() .tokens .values() .cloned() @@ -222,7 +229,7 @@ where let mut masp_reward_denoms = BTreeMap::new(); // Put the native rewards first because other inflation computations depend // on it - let native_token = wl_storage.storage.native_token.clone(); + let native_token = storage.get_native_token()?; masp_reward_keys.sort_unstable_by(|x, y| { if (*x == native_token) == (*y == native_token) { Ordering::Equal @@ -276,20 +283,24 @@ where >::new(); // Native token inflation values are always with respect to this let ref_inflation = - calculate_masp_rewards_precision(wl_storage, &native_token)?.0; + calculate_masp_rewards_precision(storage, &native_token)?.0; // Reward all tokens according to above reward rates + let epoch = storage.get_block_epoch()?; + if epoch == Epoch::default() { + return Ok(()); + } + let prev_epoch = epoch.prev(); for token in &masp_reward_keys { - let (reward, denom) = calculate_masp_rewards(wl_storage, token)?; + let (reward, denom) = calculate_masp_rewards(storage, token)?; masp_reward_denoms.insert(token.clone(), denom); // Dispense a transparent reward in parallel to the shielded rewards - let addr_bal: Amount = wl_storage + let addr_bal: Amount = storage .read(&balance_key(token, &masp_addr))? .unwrap_or_default(); // Get the last rewarded amount of the native token - let normed_inflation = wl_storage - .storage - .conversion_state + let normed_inflation = *storage + .conversion_state_mut() .normed_inflation .get_or_insert(ref_inflation); @@ -301,23 +312,19 @@ where token.clone(), denom, digit, - Some(wl_storage.storage.last_epoch), - ) - .into_storage_result()?; - let new_asset = encode_asset_type( - token.clone(), - denom, - digit, - Some(wl_storage.storage.block.epoch), + Some(prev_epoch), ) .into_storage_result()?; + let new_asset = + encode_asset_type(token.clone(), denom, digit, Some(epoch)) + .into_storage_result()?; if *token == native_token { // The amount that will be given of the new native token for // every amount of the native token given in the // previous epoch - let new_normed_inflation = Uint::from(*normed_inflation) + let new_normed_inflation = Uint::from(normed_inflation) .checked_add( - (Uint::from(*normed_inflation) * Uint::from(reward.0)) + (Uint::from(normed_inflation) * Uint::from(reward.0)) / reward.1, ) .and_then(|x| x.try_into().ok()) @@ -328,7 +335,7 @@ where inflation parameters.", token ); - *normed_inflation + normed_inflation }); // The conversion is computed such that if consecutive // conversions are added together, the @@ -338,7 +345,7 @@ where (token.clone(), denom, digit), (MaspAmount::from_pair( old_asset, - -(*normed_inflation as i128), + -(normed_inflation as i128), ) .unwrap() + MaspAmount::from_pair( @@ -353,7 +360,7 @@ where // The reward for each reward.1 units of the current asset // is reward.0 units of the reward token let native_reward = - addr_bal * (new_normed_inflation, *normed_inflation); + addr_bal * (new_normed_inflation, normed_inflation); total_reward += native_reward .0 .checked_add(native_reward.1) @@ -361,7 +368,11 @@ where .checked_sub(addr_bal) .unwrap_or_default(); // Save the new normed inflation - *normed_inflation = new_normed_inflation; + + let _ = storage + .conversion_state_mut() + .normed_inflation + .insert(new_normed_inflation); } } else { // Express the inflation reward in real terms, that is, with @@ -369,7 +380,7 @@ where // epoch let real_reward = ((Uint::from(reward.0) * Uint::from(ref_inflation)) - / *normed_inflation) + / normed_inflation) .try_into() .unwrap_or_else(|_| { tracing::warn!( @@ -404,11 +415,11 @@ where } } // Add a conversion from the previous asset type - wl_storage.storage.conversion_state.assets.insert( + storage.conversion_state_mut().assets.insert( old_asset, ( (token.clone(), denom, digit), - wl_storage.storage.last_epoch, + prev_epoch, MaspAmount::zero().into(), 0, ), @@ -420,9 +431,8 @@ where // multiple cores let num_threads = rayon::current_num_threads(); // Put assets into vector to enable computation batching - let assets: Vec<_> = wl_storage - .storage - .conversion_state + let assets: Vec<_> = storage + .conversion_state_mut() .assets .values_mut() .enumerate() @@ -452,9 +462,9 @@ where // Update the MASP's transparent reward token balance to ensure that it // is sufficiently backed to redeem rewards let reward_key = balance_key(&native_token, &masp_addr); - let addr_bal: Amount = wl_storage.read(&reward_key)?.unwrap_or_default(); + let addr_bal: Amount = storage.read(&reward_key)?.unwrap_or_default(); let new_bal = addr_bal + total_reward; - wl_storage.write(&reward_key, new_bal)?; + storage.write(&reward_key, new_bal)?; // Try to distribute Merkle tree construction as evenly as possible // across multiple cores // Merkle trees must have exactly 2^n leaves to be mergeable @@ -470,16 +480,14 @@ where // Convert conversion vector into tree so that Merkle paths can be // obtained - wl_storage.storage.conversion_state.tree = + storage.conversion_state_mut().tree = FrozenCommitmentTree::merge(&tree_parts); // Update the anchor in storage - wl_storage.write( + storage.write( &crate::storage_key::masp_convert_anchor_key(), - namada_core::types::hash::Hash( - bls12_381::Scalar::from( - wl_storage.storage.conversion_state.tree.root(), - ) - .to_bytes(), + namada_core::hash::Hash( + bls12_381::Scalar::from(storage.conversion_state().tree.root()) + .to_bytes(), ), )?; @@ -494,20 +502,17 @@ where for digit in MaspDigitPos::iter() { // Add the decoding entry for the new asset type. An uncommitted // node position is used since this is not a conversion. - let new_asset = encode_asset_type( - addr.clone(), - denom, - digit, - Some(wl_storage.storage.block.epoch), - ) - .into_storage_result()?; - wl_storage.storage.conversion_state.assets.insert( + let new_asset = + encode_asset_type(addr.clone(), denom, digit, Some(epoch)) + .into_storage_result()?; + let tree_size = storage.conversion_state().tree.size(); + storage.conversion_state_mut().assets.insert( new_asset, ( (addr.clone(), denom, digit), - wl_storage.storage.block.epoch, + epoch, MaspAmount::zero().into(), - wl_storage.storage.conversion_state.tree.size(), + tree_size, ), ); } @@ -521,18 +526,19 @@ mod tests { use std::collections::HashMap; use std::str::FromStr; - use namada_core::types::address; - use namada_core::types::dec::testing::arb_non_negative_dec; - use namada_core::types::time::DurationSecs; - use namada_core::types::token::testing::arb_amount; + use namada_core::address; + use namada_core::dec::testing::arb_non_negative_dec; + use namada_core::time::DurationSecs; + use namada_core::token::testing::arb_amount; use namada_parameters::{EpochDuration, Parameters}; - use namada_state::testing::TestWlStorage; - use namada_trans_token::{write_denom, Denomination, MaspParams}; + use namada_storage::testing::TestStorage; + use namada_trans_token::write_denom; use proptest::prelude::*; use proptest::test_runner::Config; use test_log::test; use super::*; + use crate::ShieldedParams; proptest! { #![proptest_config(Config { @@ -554,7 +560,7 @@ mod tests { ) { const ROUNDS: usize = 10; - let mut s = TestWlStorage::default(); + let mut s = TestStorage::default(); let params = Parameters { max_tx_bytes: 1024 * 1024, epoch_duration: EpochDuration { @@ -582,7 +588,7 @@ mod tests { namada_parameters::init_storage(¶ms, &mut s).unwrap(); // Tokens - let token_params = MaspParams { + let token_params = ShieldedParams { max_reward_rate: Dec::from_str("0.1").unwrap(), kp_gain_nom: Dec::from_str("0.1").unwrap(), kd_gain_nom: Dec::from_str("0.1").unwrap(), @@ -609,8 +615,7 @@ mod tests { .unwrap(); // Insert tokens into MASP conversion state - s.storage - .conversion_state + s.conversion_state_mut() .tokens .insert(alias.to_string(), token_addr.clone()); } @@ -626,13 +631,13 @@ mod tests { pub fn tokens() -> HashMap { vec![ - (address::nam(), ("nam", 6.into())), - (address::btc(), ("btc", 8.into())), - (address::eth(), ("eth", 18.into())), - (address::dot(), ("dot", 10.into())), - (address::schnitzel(), ("schnitzel", 6.into())), - (address::apfel(), ("apfel", 6.into())), - (address::kartoffel(), ("kartoffel", 6.into())), + (address::testing::nam(), ("nam", 6.into())), + (address::testing::btc(), ("btc", 8.into())), + (address::testing::eth(), ("eth", 18.into())), + (address::testing::dot(), ("dot", 10.into())), + (address::testing::schnitzel(), ("schnitzel", 6.into())), + (address::testing::apfel(), ("apfel", 6.into())), + (address::testing::kartoffel(), ("kartoffel", 6.into())), ] .into_iter() .collect() diff --git a/crates/shielded_token/src/lib.rs b/crates/shielded_token/src/lib.rs index 6e747b14da..08c70719b8 100644 --- a/crates/shielded_token/src/lib.rs +++ b/crates/shielded_token/src/lib.rs @@ -5,4 +5,50 @@ mod storage; pub mod storage_key; pub mod utils; +use std::str::FromStr; + +use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use namada_core::dec::Dec; +pub use namada_storage::conversion_state::{ + ConversionState, WithConversionState, +}; +use serde::{Deserialize, Serialize}; pub use storage::*; + +/// Token parameters for each kind of asset held on chain +#[derive( + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + BorshSerialize, + BorshDeserialize, + BorshSchema, + Deserialize, + Serialize, +)] +pub struct ShieldedParams { + /// Maximum reward rate + pub max_reward_rate: Dec, + /// Shielded Pool nominal derivative gain + pub kd_gain_nom: Dec, + /// Shielded Pool nominal proportional gain for the given token + pub kp_gain_nom: Dec, + /// Target amount for the given token that is locked in the shielded pool + /// TODO: should this be a Uint or DenominatedAmount??? + pub locked_amount_target: u64, +} + +impl Default for ShieldedParams { + fn default() -> Self { + Self { + max_reward_rate: Dec::from_str("0.1").unwrap(), + kp_gain_nom: Dec::from_str("0.25").unwrap(), + kd_gain_nom: Dec::from_str("0.25").unwrap(), + locked_amount_target: 10_000_u64, + } + } +} diff --git a/crates/shielded_token/src/storage.rs b/crates/shielded_token/src/storage.rs index 4fe2127aec..0c975296ab 100644 --- a/crates/shielded_token/src/storage.rs +++ b/crates/shielded_token/src/storage.rs @@ -1,16 +1,17 @@ -use namada_core::types::address::Address; -use namada_core::types::token; -use namada_core::types::token::Amount; -use namada_core::types::uint::Uint; +use namada_core::address::Address; +use namada_core::token; +use namada_core::token::Amount; +use namada_core::uint::Uint; use namada_storage as storage; use namada_storage::{StorageRead, StorageWrite}; use storage::ResultExt; use crate::storage_key::*; +use crate::ShieldedParams; /// Initialize parameters for the token in storage during the genesis block. pub fn write_params( - params: &token::MaspParams, + params: &ShieldedParams, storage: &mut S, address: &Address, denom: &token::Denomination, @@ -18,7 +19,7 @@ pub fn write_params( where S: StorageRead + StorageWrite, { - let token::MaspParams { + let ShieldedParams { max_reward_rate: max_rate, kd_gain_nom, kp_gain_nom, diff --git a/crates/shielded_token/src/storage_key.rs b/crates/shielded_token/src/storage_key.rs index e58ffe93a0..832a8ac9dc 100644 --- a/crates/shielded_token/src/storage_key.rs +++ b/crates/shielded_token/src/storage_key.rs @@ -2,9 +2,9 @@ use masp_primitives::bls12_381::Scalar; use masp_primitives::sapling::Nullifier; -use namada_core::types::address::{self, Address}; -use namada_core::types::hash::Hash; -use namada_core::types::storage::{self, DbKeySeg, KeySeg}; +use namada_core::address::{self, Address}; +use namada_core::hash::Hash; +use namada_core::storage::{self, DbKeySeg, KeySeg}; use namada_trans_token::storage_key::parameter_prefix; /// Key segment prefix for pinned shielded transactions diff --git a/crates/shielded_token/src/utils.rs b/crates/shielded_token/src/utils.rs index 4dfbaa9e89..42fc6413dd 100644 --- a/crates/shielded_token/src/utils.rs +++ b/crates/shielded_token/src/utils.rs @@ -3,7 +3,7 @@ use masp_primitives::merkle_tree::CommitmentTree; use masp_primitives::sapling::Node; use masp_primitives::transaction::Transaction; -use namada_core::types::storage::IndexedTx; +use namada_core::storage::IndexedTx; use namada_storage::{Error, Result, StorageRead, StorageWrite}; use crate::storage_key::{ diff --git a/crates/state/Cargo.toml b/crates/state/Cargo.toml index 48460ae64f..df16bc3d7e 100644 --- a/crates/state/Cargo.toml +++ b/crates/state/Cargo.toml @@ -23,6 +23,7 @@ namada_core = { path = "../core", default-features = false } namada_gas = { path = "../gas" } namada_merkle_tree = { path = "../merkle_tree" } namada_parameters = { path = "../parameters" } +namada_replay_protection = { path = "../replay_protection" } namada_storage = { path = "../storage" } namada_trans_token = { path = "../trans_token" } namada_tx = { path = "../tx" } diff --git a/crates/state/src/host_env.rs b/crates/state/src/host_env.rs new file mode 100644 index 0000000000..2b2a1ef818 --- /dev/null +++ b/crates/state/src/host_env.rs @@ -0,0 +1,127 @@ +use std::cell::RefCell; + +use namada_core::validity_predicate::VpSentinel; +use namada_gas::{GasMetering, TxGasMeter, VpGasMeter}; +use namada_tx::data::TxSentinel; + +use crate::in_memory::InMemory; +use crate::write_log::WriteLog; +use crate::{DBIter, Error, Result, State, StateRead, StorageHasher, DB}; + +// State with mutable write log and gas metering for tx host env. +#[derive(Debug)] +pub struct TxHostEnvState<'a, D, H> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// Write log + pub write_log: &'a mut WriteLog, + // DB + pub db: &'a D, + /// State + pub in_mem: &'a InMemory, + /// Tx gas meter + pub gas_meter: &'a RefCell, + /// Errors sentinel + pub sentinel: &'a RefCell, +} + +// Read-only state with gas metering for VP host env. +#[derive(Debug)] +pub struct VpHostEnvState<'a, D, H> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// Write log + pub write_log: &'a WriteLog, + // DB + pub db: &'a D, + /// State + pub in_mem: &'a InMemory, + /// VP gas meter + pub gas_meter: &'a RefCell, + /// Errors sentinel + pub sentinel: &'a RefCell, +} + +impl StateRead for TxHostEnvState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + type D = D; + type H = H; + + fn write_log(&self) -> &WriteLog { + self.write_log + } + + fn db(&self) -> &D { + self.db + } + + fn in_mem(&self) -> &InMemory { + self.in_mem + } + + fn charge_gas(&self, gas: u64) -> Result<()> { + self.gas_meter.borrow_mut().consume(gas).map_err(|err| { + self.sentinel.borrow_mut().set_out_of_gas(); + tracing::info!( + "Stopping transaction execution because of gas error: {}", + err + ); + Error::Gas(err) + }) + } +} + +impl State for TxHostEnvState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn write_log_mut(&mut self) -> &mut WriteLog { + self.write_log + } + + fn split_borrow( + &mut self, + ) -> (&mut WriteLog, &InMemory, &Self::D) { + (self.write_log, (self.in_mem), (self.db)) + } +} + +impl StateRead for VpHostEnvState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + type D = D; + type H = H; + + fn write_log(&self) -> &WriteLog { + self.write_log + } + + fn db(&self) -> &D { + self.db + } + + fn in_mem(&self) -> &InMemory { + self.in_mem + } + + fn charge_gas(&self, gas: u64) -> Result<()> { + self.gas_meter.borrow_mut().consume(gas).map_err(|err| { + self.sentinel.borrow_mut().set_out_of_gas(); + tracing::info!( + "Stopping VP execution because of gas error: {}", + err + ); + Error::Gas(err) + }) + } +} diff --git a/crates/state/src/in_memory.rs b/crates/state/src/in_memory.rs new file mode 100644 index 0000000000..2d53b92b7c --- /dev/null +++ b/crates/state/src/in_memory.rs @@ -0,0 +1,299 @@ +use namada_core::address::{Address, EstablishedAddressGen, InternalAddress}; +use namada_core::borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::chain::{ChainId, CHAIN_ID_LENGTH}; +use namada_core::time::DateTimeUtc; +use namada_core::{encode, ethereum_structs}; +use namada_gas::MEMORY_ACCESS_GAS_PER_BYTE; +use namada_merkle_tree::{MerkleRoot, MerkleTree}; +use namada_parameters::{EpochDuration, Parameters}; +use namada_storage::conversion_state::ConversionState; +use namada_storage::tx_queue::{ExpiredTxsQueue, TxQueue}; +use namada_storage::{ + BlockHash, BlockHeight, BlockResults, Epoch, Epochs, EthEventsQueue, + Header, Key, KeySeg, StorageHasher, TxIndex, BLOCK_HASH_LENGTH, + BLOCK_HEIGHT_LENGTH, EPOCH_TYPE_LENGTH, +}; + +use crate::{Error, Result}; + +/// The ledger's state +#[derive(Debug)] +pub struct InMemory +where + H: StorageHasher, +{ + /// The ID of the chain + pub chain_id: ChainId, + /// The address of the native token - this is not stored in DB, but read + /// from genesis + pub native_token: Address, + /// Block storage data + pub block: BlockStorage, + /// During `FinalizeBlock`, this is the header of the block that is + /// going to be committed. After a block is committed, this is reset to + /// `None` until the next `FinalizeBlock` phase is reached. + pub header: Option
, + /// The most recently committed block, if any. + pub last_block: Option, + /// The epoch of the most recently committed block. If it is `Epoch(0)`, + /// then no block may have been committed for this chain yet. + pub last_epoch: Epoch, + /// Minimum block height at which the next epoch may start + pub next_epoch_min_start_height: BlockHeight, + /// Minimum block time at which the next epoch may start + pub next_epoch_min_start_time: DateTimeUtc, + /// The current established address generator + pub address_gen: EstablishedAddressGen, + /// We delay the switch to a new epoch by the number of blocks set in here. + /// This is `Some` when minimum number of blocks has been created and + /// minimum time has passed since the beginning of the last epoch. + /// Once the value is `Some(0)`, we're ready to switch to a new epoch and + /// this is reset back to `None`. + pub update_epoch_blocks_delay: Option, + /// The shielded transaction index + pub tx_index: TxIndex, + /// The currently saved conversion state + pub conversion_state: ConversionState, + /// Wrapper txs to be decrypted in the next block proposal + pub tx_queue: TxQueue, + /// Queue of expired transactions that need to be retransmitted. + /// + /// These transactions do not need to be persisted, as they are + /// retransmitted at the **COMMIT** phase immediately following + /// the block when they were queued. + pub expired_txs_queue: ExpiredTxsQueue, + /// The latest block height on Ethereum processed, if + /// the bridge is enabled. + pub ethereum_height: Option, + /// The queue of Ethereum events to be processed in order. + pub eth_events_queue: EthEventsQueue, + /// How many block heights in the past can the storage be queried + pub storage_read_past_height_limit: Option, +} + +/// Last committed block +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] +pub struct LastBlock { + /// Block height + pub height: BlockHeight, + /// Block hash + pub hash: BlockHash, + /// Block time + pub time: DateTimeUtc, +} + +/// The block storage data +#[derive(Debug)] +pub struct BlockStorage { + /// Merkle tree of all the other data in block storage + pub tree: MerkleTree, + /// During `FinalizeBlock`, this is updated to be the hash of the block + /// that is going to be committed. If it is `BlockHash::default()`, + /// then no `FinalizeBlock` stage has been reached yet. + pub hash: BlockHash, + /// From the start of `FinalizeBlock` until the end of `Commit`, this is + /// height of the block that is going to be committed. Otherwise, it is the + /// height of the most recently committed block, or `BlockHeight::sentinel` + /// (0) if no block has been committed yet. + pub height: BlockHeight, + /// From the start of `FinalizeBlock` until the end of `Commit`, this is + /// height of the block that is going to be committed. Otherwise it is the + /// epoch of the most recently committed block, or `Epoch(0)` if no block + /// has been committed yet. + pub epoch: Epoch, + /// Results of applying transactions + pub results: BlockResults, + /// Predecessor block epochs + pub pred_epochs: Epochs, +} + +impl InMemory +where + H: StorageHasher, +{ + /// Create a new instance of the state + pub fn new( + chain_id: ChainId, + native_token: Address, + storage_read_past_height_limit: Option, + ) -> Self { + let block = BlockStorage { + tree: MerkleTree::default(), + hash: BlockHash::default(), + height: BlockHeight::default(), + epoch: Epoch::default(), + pred_epochs: Epochs::default(), + results: BlockResults::default(), + }; + InMemory:: { + chain_id, + block, + header: None, + last_block: None, + last_epoch: Epoch::default(), + next_epoch_min_start_height: BlockHeight::default(), + next_epoch_min_start_time: DateTimeUtc::now(), + address_gen: EstablishedAddressGen::new( + "Privacy is a function of liberty.", + ), + update_epoch_blocks_delay: None, + tx_index: TxIndex::default(), + conversion_state: ConversionState::default(), + tx_queue: TxQueue::default(), + expired_txs_queue: ExpiredTxsQueue::default(), + native_token, + ethereum_height: None, + eth_events_queue: EthEventsQueue::default(), + storage_read_past_height_limit, + } + } + + /// Returns the Merkle root hash and the height of the committed block. If + /// no block exists, returns None. + pub fn get_state(&self) -> Option<(MerkleRoot, u64)> { + if self.block.height.0 != 0 { + Some((self.block.tree.root(), self.block.height.0)) + } else { + None + } + } + + /// Find the root hash of the merkle tree + pub fn merkle_root(&self) -> MerkleRoot { + self.block.tree.root() + } + + /// Set the block header. + /// The header is not in the Merkle tree as it's tracked by Tendermint. + /// Hence, we don't update the tree when this is set. + pub fn set_header(&mut self, header: Header) -> Result<()> { + self.header = Some(header); + Ok(()) + } + + /// Block data is in the Merkle tree as it's tracked by Tendermint in the + /// block header. Hence, we don't update the tree when this is set. + pub fn begin_block( + &mut self, + hash: BlockHash, + height: BlockHeight, + ) -> Result<()> { + self.block.hash = hash; + self.block.height = height; + Ok(()) + } + + /// Get the chain ID as a raw string + pub fn get_chain_id(&self) -> (String, u64) { + ( + self.chain_id.to_string(), + CHAIN_ID_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, + ) + } + + /// Get the block height + pub fn get_block_height(&self) -> (BlockHeight, u64) { + ( + self.block.height, + BLOCK_HEIGHT_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, + ) + } + + /// Get the block hash + pub fn get_block_hash(&self) -> (BlockHash, u64) { + ( + self.block.hash.clone(), + BLOCK_HASH_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, + ) + } + + /// Get the current (yet to be committed) block epoch + pub fn get_current_epoch(&self) -> (Epoch, u64) { + ( + self.block.epoch, + EPOCH_TYPE_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, + ) + } + + /// Get the epoch of the last committed block + pub fn get_last_epoch(&self) -> (Epoch, u64) { + ( + self.last_epoch, + EPOCH_TYPE_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, + ) + } + + /// Initialize the first epoch. The first epoch begins at genesis time. + pub fn init_genesis_epoch( + &mut self, + initial_height: BlockHeight, + genesis_time: DateTimeUtc, + parameters: &Parameters, + ) -> Result<()> { + let EpochDuration { + min_num_of_blocks, + min_duration, + } = parameters.epoch_duration; + self.next_epoch_min_start_height = initial_height + min_num_of_blocks; + self.next_epoch_min_start_time = genesis_time + min_duration; + self.block.pred_epochs = Epochs { + first_block_heights: vec![initial_height], + }; + self.update_epoch_in_merkle_tree() + } + + /// Get the current conversions + pub fn get_conversion_state(&self) -> &ConversionState { + &self.conversion_state + } + + /// Update the merkle tree with epoch data + pub fn update_epoch_in_merkle_tree(&mut self) -> Result<()> { + let key_prefix: Key = + Address::Internal(InternalAddress::PoS).to_db_key().into(); + + let key = key_prefix + .push(&"epoch_start_height".to_string()) + .map_err(Error::KeyError)?; + self.block + .tree + .update(&key, encode(&self.next_epoch_min_start_height))?; + + let key = key_prefix + .push(&"epoch_start_time".to_string()) + .map_err(Error::KeyError)?; + self.block + .tree + .update(&key, encode(&self.next_epoch_min_start_time))?; + + let key = key_prefix + .push(&"current_epoch".to_string()) + .map_err(Error::KeyError)?; + self.block.tree.update(&key, encode(&self.block.epoch))?; + + Ok(()) + } + + /// Get the height of the last committed block or 0 if no block has been + /// committed yet. The first block is at height 1. + pub fn get_last_block_height(&self) -> BlockHeight { + self.last_block + .as_ref() + .map(|b| b.height) + .unwrap_or_default() + } + + /// Get the oldest epoch where we can read a value + pub fn get_oldest_epoch(&self) -> Epoch { + let oldest_height = match self.storage_read_past_height_limit { + Some(limit) if limit < self.get_last_block_height().0 => { + (self.get_last_block_height().0 - limit).into() + } + _ => BlockHeight(1), + }; + self.block + .pred_epochs + .get_epoch(oldest_height) + .unwrap_or_default() + } +} diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index 8274277158..afe728c4fa 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -1,45 +1,49 @@ //! Ledger's state storage with key-value backed store and a merkle tree -pub mod wl_storage; +mod host_env; +mod in_memory; +mod wl_state; pub mod write_log; -use core::fmt::Debug; -use std::cmp::Ordering; -use std::format; +use std::fmt::Debug; +use std::iter::Peekable; -use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; -use namada_core::tendermint::merkle::proof::ProofOps; -use namada_core::types::address::{ - Address, EstablishedAddressGen, InternalAddress, -}; -use namada_core::types::chain::{ChainId, CHAIN_ID_LENGTH}; -use namada_core::types::eth_bridge_pool::is_pending_transfer_key; -use namada_core::types::hash::{Error as HashError, Hash}; -pub use namada_core::types::hash::{Sha256Hasher, StorageHasher}; -pub use namada_core::types::storage::{ +pub use host_env::{TxHostEnvState, VpHostEnvState}; +pub use in_memory::{BlockStorage, InMemory, LastBlock}; +use namada_core::address::Address; +use namada_core::eth_bridge_pool::is_pending_transfer_key; +pub use namada_core::hash::Sha256Hasher; +use namada_core::hash::{Error as HashError, Hash}; +pub use namada_core::storage::{ BlockHash, BlockHeight, BlockResults, Epoch, Epochs, EthEventsQueue, Header, Key, KeySeg, TxIndex, BLOCK_HASH_LENGTH, BLOCK_HEIGHT_LENGTH, EPOCH_TYPE_LENGTH, }; -use namada_core::types::time::DateTimeUtc; -pub use namada_core::types::token::ConversionState; -use namada_core::types::{encode, ethereum_structs, storage}; +use namada_core::tendermint::merkle::proof::ProofOps; use namada_gas::{ MEMORY_ACCESS_GAS_PER_BYTE, STORAGE_ACCESS_GAS_PER_BYTE, STORAGE_WRITE_GAS_PER_BYTE, }; +use namada_merkle_tree::Error as MerkleTreeError; pub use namada_merkle_tree::{ self as merkle_tree, ics23_specs, MembershipProof, MerkleTree, MerkleTreeStoresRead, MerkleTreeStoresWrite, StoreRef, StoreType, }; -use namada_merkle_tree::{Error as MerkleTreeError, MerkleRoot}; -use namada_parameters::{self, EpochDuration, Parameters}; -pub use namada_storage::{Error as StorageError, Result as StorageResult, *}; -use thiserror::Error; -use tx_queue::{ExpiredTxsQueue, TxQueue}; -pub use wl_storage::{ - iter_prefix_post, iter_prefix_pre, PrefixIter, TempWlStorage, WlStorage, +pub use namada_storage as storage; +pub use namada_storage::conversion_state::{ + ConversionState, WithConversionState, +}; +pub use namada_storage::types::{KVBytes, PrefixIterator}; +pub use namada_storage::{ + collections, iter_prefix, iter_prefix_bytes, iter_prefix_with_filter, + mockdb, tx_queue, BlockStateRead, BlockStateWrite, DBIter, DBWriteBatch, + DbError, DbResult, Error as StorageError, OptionExt, + Result as StorageResult, ResultExt, StorageHasher, StorageRead, + StorageWrite, DB, }; +use thiserror::Error; +pub use wl_state::{FullAccessState, TempWlState, WlState}; +use write_log::WriteLog; /// A result of a function that may fail pub type Result = std::result::Result; @@ -48,314 +52,38 @@ pub type Result = std::result::Result; /// it has 2 blocks delay on validator set update. pub const EPOCH_SWITCH_BLOCKS_DELAY: u32 = 2; -/// The ledger's state -#[derive(Debug)] -pub struct State -where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, -{ - /// The database for the storage - pub db: D, - /// The ID of the chain - pub chain_id: ChainId, - /// The address of the native token - this is not stored in DB, but read - /// from genesis - pub native_token: Address, - /// Block storage data - pub block: BlockStorage, - /// During `FinalizeBlock`, this is the header of the block that is - /// going to be committed. After a block is committed, this is reset to - /// `None` until the next `FinalizeBlock` phase is reached. - pub header: Option
, - /// The most recently committed block, if any. - pub last_block: Option, - /// The epoch of the most recently committed block. If it is `Epoch(0)`, - /// then no block may have been committed for this chain yet. - pub last_epoch: Epoch, - /// Minimum block height at which the next epoch may start - pub next_epoch_min_start_height: BlockHeight, - /// Minimum block time at which the next epoch may start - pub next_epoch_min_start_time: DateTimeUtc, - /// The current established address generator - pub address_gen: EstablishedAddressGen, - /// We delay the switch to a new epoch by the number of blocks set in here. - /// This is `Some` when minimum number of blocks has been created and - /// minimum time has passed since the beginning of the last epoch. - /// Once the value is `Some(0)`, we're ready to switch to a new epoch and - /// this is reset back to `None`. - pub update_epoch_blocks_delay: Option, - /// The shielded transaction index - pub tx_index: TxIndex, - /// The currently saved conversion state - pub conversion_state: ConversionState, - /// Wrapper txs to be decrypted in the next block proposal - pub tx_queue: TxQueue, - /// Queue of expired transactions that need to be retransmitted. - /// - /// These transactions do not need to be persisted, as they are - /// retransmitted at the **COMMIT** phase immediately following - /// the block when they were queued. - pub expired_txs_queue: ExpiredTxsQueue, - /// The latest block height on Ethereum processed, if - /// the bridge is enabled. - pub ethereum_height: Option, - /// The queue of Ethereum events to be processed in order. - pub eth_events_queue: EthEventsQueue, - /// How many block heights in the past can the storage be queried - pub storage_read_past_height_limit: Option, - /// Static merkle tree storage key filter - pub merkle_tree_key_filter: fn(&storage::Key) -> bool, -} - -/// Last committed block -#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] -pub struct LastBlock { - /// Block height - pub height: BlockHeight, - /// Block hash - pub hash: BlockHash, - /// Block time - pub time: DateTimeUtc, -} +/// Common trait for read-only access to write log, DB and in-memory state. +pub trait StateRead: StorageRead + Debug { + /// DB type + type D: 'static + DB + for<'iter> DBIter<'iter>; + /// DB hasher type + type H: 'static + StorageHasher; -/// The block storage data -#[derive(Debug)] -pub struct BlockStorage { - /// Merkle tree of all the other data in block storage - pub tree: MerkleTree, - /// During `FinalizeBlock`, this is updated to be the hash of the block - /// that is going to be committed. If it is `BlockHash::default()`, - /// then no `FinalizeBlock` stage has been reached yet. - pub hash: BlockHash, - /// From the start of `FinalizeBlock` until the end of `Commit`, this is - /// height of the block that is going to be committed. Otherwise, it is the - /// height of the most recently committed block, or `BlockHeight::sentinel` - /// (0) if no block has been committed yet. - pub height: BlockHeight, - /// From the start of `FinalizeBlock` until the end of `Commit`, this is - /// height of the block that is going to be committed. Otherwise it is the - /// epoch of the most recently committed block, or `Epoch(0)` if no block - /// has been committed yet. - pub epoch: Epoch, - /// Results of applying transactions - pub results: BlockResults, - /// Predecessor block epochs - pub pred_epochs: Epochs, -} + /// Borrow `WriteLog` + fn write_log(&self) -> &WriteLog; -pub fn merklize_all_keys(_key: &storage::Key) -> bool { - true -} + /// Borrow `DB` + fn db(&self) -> &Self::D; -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("TEMPORARY error: {error}")] - Temporary { error: String }, - #[error("Found an unknown key: {key}")] - UnknownKey { key: String }, - #[error("Storage key error {0}")] - KeyError(namada_core::types::storage::Error), - #[error("Coding error: {0}")] - CodingError(#[from] namada_core::types::DecodeError), - #[error("Merkle tree error: {0}")] - MerkleTreeError(MerkleTreeError), - #[error("DB error: {0}")] - DBError(String), - #[error("Borsh (de)-serialization error: {0}")] - BorshCodingError(std::io::Error), - #[error("Merkle tree at the height {height} is not stored")] - NoMerkleTree { height: BlockHeight }, - #[error("Code hash error: {0}")] - InvalidCodeHash(HashError), - #[error("DB error: {0}")] - DbError(#[from] namada_storage::DbError), -} + /// Borrow `InMemory` state + fn in_mem(&self) -> &InMemory; -impl State -where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, -{ - /// open up a new instance of the storage given path to db and chain id - pub fn open( - db_path: impl AsRef, - chain_id: ChainId, - native_token: Address, - cache: Option<&D::Cache>, - storage_read_past_height_limit: Option, - merkle_tree_key_filter: fn(&storage::Key) -> bool, - ) -> Self { - let block = BlockStorage { - tree: MerkleTree::default(), - hash: BlockHash::default(), - height: BlockHeight::default(), - epoch: Epoch::default(), - pred_epochs: Epochs::default(), - results: BlockResults::default(), - }; - State:: { - db: D::open(db_path, cache), - chain_id, - block, - header: None, - last_block: None, - last_epoch: Epoch::default(), - next_epoch_min_start_height: BlockHeight::default(), - next_epoch_min_start_time: DateTimeUtc::now(), - address_gen: EstablishedAddressGen::new( - "Privacy is a function of liberty.", - ), - update_epoch_blocks_delay: None, - tx_index: TxIndex::default(), - conversion_state: ConversionState::default(), - tx_queue: TxQueue::default(), - expired_txs_queue: ExpiredTxsQueue::default(), - native_token, - ethereum_height: None, - eth_events_queue: EthEventsQueue::default(), - storage_read_past_height_limit, - merkle_tree_key_filter, - } - } - - /// Load the full state at the last committed height, if any. Returns the - /// Merkle root hash and the height of the committed block. - pub fn load_last_state(&mut self) -> Result<()> { - if let Some(BlockStateRead { - merkle_tree_stores, - hash, - height, - time, - epoch, - pred_epochs, - next_epoch_min_start_height, - next_epoch_min_start_time, - update_epoch_blocks_delay, - results, - address_gen, - conversion_state, - tx_queue, - ethereum_height, - eth_events_queue, - }) = self.db.read_last_block()? - { - self.block.hash = hash.clone(); - self.block.height = height; - self.block.epoch = epoch; - self.block.results = results; - self.block.pred_epochs = pred_epochs; - self.last_block = Some(LastBlock { height, hash, time }); - self.last_epoch = epoch; - self.next_epoch_min_start_height = next_epoch_min_start_height; - self.next_epoch_min_start_time = next_epoch_min_start_time; - self.update_epoch_blocks_delay = update_epoch_blocks_delay; - self.address_gen = address_gen; - // Rebuild Merkle tree - self.block.tree = MerkleTree::new(merkle_tree_stores) - .or_else(|_| self.rebuild_full_merkle_tree(height))?; - self.conversion_state = conversion_state; - self.tx_queue = tx_queue; - self.ethereum_height = ethereum_height; - self.eth_events_queue = eth_events_queue; - tracing::debug!("Loaded storage from DB"); - } else { - tracing::info!("No state could be found"); - } - Ok(()) - } - - /// Returns the Merkle root hash and the height of the committed block. If - /// no block exists, returns None. - pub fn get_state(&self) -> Option<(MerkleRoot, u64)> { - if self.block.height.0 != 0 { - Some((self.block.tree.root(), self.block.height.0)) - } else { - None - } - } - - /// Persist the current block's state to the database - pub fn commit_block(&mut self, mut batch: D::WriteBatch) -> Result<()> { - // All states are written only when the first height or a new epoch - let is_full_commit = - self.block.height.0 == 1 || self.last_epoch != self.block.epoch; - - // For convenience in tests, fill-in a header if it's missing. - // Normally, the header is added in `FinalizeBlock`. - #[cfg(any(test, feature = "testing"))] - { - if self.header.is_none() { - self.header = Some(Header { - hash: Hash::default(), - time: DateTimeUtc::now(), - next_validators_hash: Hash::default(), - }); - } - } - - let state = BlockStateWrite { - merkle_tree_stores: self.block.tree.stores(), - header: self.header.as_ref(), - hash: &self.block.hash, - height: self.block.height, - time: self - .header - .as_ref() - .expect("Must have a block header on commit") - .time, - epoch: self.block.epoch, - results: &self.block.results, - pred_epochs: &self.block.pred_epochs, - next_epoch_min_start_height: self.next_epoch_min_start_height, - next_epoch_min_start_time: self.next_epoch_min_start_time, - update_epoch_blocks_delay: self.update_epoch_blocks_delay, - address_gen: &self.address_gen, - conversion_state: &self.conversion_state, - tx_queue: &self.tx_queue, - ethereum_height: self.ethereum_height.as_ref(), - eth_events_queue: &self.eth_events_queue, - }; - self.db - .add_block_to_batch(state, &mut batch, is_full_commit)?; - let header = self - .header - .take() - .expect("Must have a block header on commit"); - self.last_block = Some(LastBlock { - height: self.block.height, - hash: header.hash.into(), - time: header.time, - }); - self.last_epoch = self.block.epoch; - if is_full_commit { - // prune old merkle tree stores - self.prune_merkle_tree_stores(&mut batch)?; - } - self.db.exec_batch(batch)?; - Ok(()) - } - - /// Find the root hash of the merkle tree - pub fn merkle_root(&self) -> MerkleRoot { - self.block.tree.root() - } + fn charge_gas(&self, gas: u64) -> Result<()>; /// Check if the given key is present in storage. Returns the result and the /// gas cost. - pub fn has_key(&self, key: &Key) -> Result<(bool, u64)> { + fn db_has_key(&self, key: &storage::Key) -> Result<(bool, u64)> { Ok(( - self.db.read_subspace_val(key)?.is_some(), + self.db().read_subspace_val(key)?.is_some(), key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE, )) } /// Returns a value from the specified subspace and the gas cost - pub fn read(&self, key: &Key) -> Result<(Option>, u64)> { + fn db_read(&self, key: &storage::Key) -> Result<(Option>, u64)> { tracing::debug!("storage read key {}", key); - match self.db.read_subspace_val(key)? { + match self.db().read_subspace_val(key)? { Some(v) => { let gas = (key.len() + v.len()) as u64 * STORAGE_ACCESS_GAS_PER_BYTE; @@ -365,139 +93,29 @@ where } } - /// Returns a value from the specified subspace at the given height (or the - /// last committed height when 0) and the gas cost. - pub fn read_with_height( - &self, - key: &Key, - height: BlockHeight, - ) -> Result<(Option>, u64)> { - // `0` means last committed height - if height == BlockHeight(0) || height >= self.get_last_block_height() { - self.read(key) - } else { - if !(self.merkle_tree_key_filter)(key) { - return Ok((None, 0)); - } - - match self.db.read_subspace_val_with_height( - key, - height, - self.get_last_block_height(), - )? { - Some(v) => { - let gas = (key.len() + v.len()) as u64 - * STORAGE_ACCESS_GAS_PER_BYTE; - Ok((Some(v), gas)) - } - None => { - Ok((None, key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE)) - } - } - } - } - /// WARNING: This only works for values that have been committed to DB. /// To be able to see values written or deleted, but not yet committed, /// use the `StorageWithWriteLog`. /// /// Returns a prefix iterator, ordered by storage keys, and the gas cost. - pub fn iter_prefix( + fn db_iter_prefix( &self, prefix: &Key, - ) -> (>::PrefixIter, u64) { + ) -> (>::PrefixIter, u64) { ( - self.db.iter_prefix(Some(prefix)), + self.db().iter_prefix(Some(prefix)), prefix.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE, ) } /// Returns an iterator over the block results - pub fn iter_results(&self) -> (>::PrefixIter, u64) { - (self.db.iter_results(), 0) - } - - /// Write a value to the specified subspace and returns the gas cost and the - /// size difference - pub fn write( - &mut self, - key: &Key, - value: impl AsRef<[u8]>, - ) -> Result<(u64, i64)> { - // Note that this method is the same as `StorageWrite::write_bytes`, - // but with gas and storage bytes len diff accounting - tracing::debug!("storage write key {}", key,); - let value = value.as_ref(); - let is_key_merklized = (self.merkle_tree_key_filter)(key); - - if is_pending_transfer_key(key) { - // The tree of the bright pool stores the current height for the - // pending transfer - let height = self.block.height.serialize_to_vec(); - self.block.tree.update(key, height)?; - } else { - // Update the merkle tree - if is_key_merklized { - self.block.tree.update(key, value)?; - } - } - - let len = value.len(); - let gas = (key.len() + len) as u64 * STORAGE_WRITE_GAS_PER_BYTE; - let size_diff = self.db.write_subspace_val( - self.block.height, - key, - value, - is_key_merklized, - )?; - Ok((gas, size_diff)) - } - - /// Delete the specified subspace and returns the gas cost and the size - /// difference - pub fn delete(&mut self, key: &Key) -> Result<(u64, i64)> { - // Note that this method is the same as `StorageWrite::delete`, - // but with gas and storage bytes len diff accounting - let mut deleted_bytes_len = 0; - if self.has_key(key)?.0 { - let is_key_merklized = (self.merkle_tree_key_filter)(key); - if is_key_merklized { - self.block.tree.delete(key)?; - } - deleted_bytes_len = self.db.delete_subspace_val( - self.block.height, - key, - is_key_merklized, - )?; - } - let gas = (key.len() + deleted_bytes_len as usize) as u64 - * STORAGE_WRITE_GAS_PER_BYTE; - Ok((gas, deleted_bytes_len)) - } - - /// Set the block header. - /// The header is not in the Merkle tree as it's tracked by Tendermint. - /// Hence, we don't update the tree when this is set. - pub fn set_header(&mut self, header: Header) -> Result<()> { - self.header = Some(header); - Ok(()) - } - - /// Block data is in the Merkle tree as it's tracked by Tendermint in the - /// block header. Hence, we don't update the tree when this is set. - pub fn begin_block( - &mut self, - hash: BlockHash, - height: BlockHeight, - ) -> Result<()> { - self.block.hash = hash; - self.block.height = height; - Ok(()) + fn db_iter_results(&self) -> (>::PrefixIter, u64) { + (self.db().iter_results(), 0) } /// Get the hash of a validity predicate for the given account address and /// the gas cost for reading it. - pub fn validity_predicate( + fn validity_predicate( &self, addr: &Address, ) -> Result<(Option, u64)> { @@ -506,7 +124,7 @@ where } else { Key::validity_predicate(addr) }; - match self.read(&key)? { + match self.db_read(&key)? { (Some(value), gas) => { let vp_code_hash = Hash::try_from(&value[..]) .map_err(Error::InvalidCodeHash)?; @@ -516,311 +134,14 @@ where } } - #[allow(dead_code)] - /// Check if the given address exists on chain and return the gas cost. - pub fn exists(&self, addr: &Address) -> Result<(bool, u64)> { - let key = Key::validity_predicate(addr); - self.has_key(&key) - } - - /// Get the chain ID as a raw string - pub fn get_chain_id(&self) -> (String, u64) { - ( - self.chain_id.to_string(), - CHAIN_ID_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, - ) - } - - /// Get the block height - pub fn get_block_height(&self) -> (BlockHeight, u64) { - ( - self.block.height, - BLOCK_HEIGHT_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, - ) - } - - /// Get the block hash - pub fn get_block_hash(&self) -> (BlockHash, u64) { - ( - self.block.hash.clone(), - BLOCK_HASH_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, - ) - } - - /// Rebuild full Merkle tree after [`read_last_block()`] - fn rebuild_full_merkle_tree( - &self, - height: BlockHeight, - ) -> Result> { - self.get_merkle_tree(height, None) - } - - /// Rebuild Merkle tree with diffs in the DB. - /// Base tree and the specified `store_type` subtree is rebuilt. - /// If `store_type` isn't given, full Merkle tree is restored. - pub fn get_merkle_tree( - &self, - height: BlockHeight, - store_type: Option, - ) -> Result> { - // `0` means last committed height - let height = if height == BlockHeight(0) { - self.get_last_block_height() - } else { - height - }; - - let epoch = self - .block - .pred_epochs - .get_epoch(height) - .unwrap_or(Epoch::default()); - let epoch_start_height = - match self.block.pred_epochs.get_start_height_of_epoch(epoch) { - Some(height) if height == BlockHeight(0) => BlockHeight(1), - Some(height) => height, - None => BlockHeight(1), - }; - let stores = self - .db - .read_merkle_tree_stores(epoch, epoch_start_height, store_type)? - .ok_or(Error::NoMerkleTree { height })?; - let prefix = store_type.and_then(|st| st.provable_prefix()); - let mut tree = match store_type { - Some(_) => MerkleTree::::new_partial(stores), - None => MerkleTree::::new(stores).expect("invalid stores"), - }; - // Restore the tree state with diffs - let mut target_height = epoch_start_height; - while target_height < height { - target_height = target_height.next_height(); - let mut old_diff_iter = - self.db.iter_old_diffs(target_height, prefix.as_ref()); - let mut new_diff_iter = - self.db.iter_new_diffs(target_height, prefix.as_ref()); - - let mut old_diff = old_diff_iter.next(); - let mut new_diff = new_diff_iter.next(); - loop { - match (&old_diff, &new_diff) { - (Some(old), Some(new)) => { - let old_key = Key::parse(old.0.clone()) - .expect("the key should be parsable"); - let new_key = Key::parse(new.0.clone()) - .expect("the key should be parsable"); - - // compare keys as String - match old.0.cmp(&new.0) { - Ordering::Equal => { - // the value was updated - if (self.merkle_tree_key_filter)(&new_key) { - tree.update( - &new_key, - if is_pending_transfer_key(&new_key) { - target_height.serialize_to_vec() - } else { - new.1.clone() - }, - )?; - } - old_diff = old_diff_iter.next(); - new_diff = new_diff_iter.next(); - } - Ordering::Less => { - // the value was deleted - if (self.merkle_tree_key_filter)(&old_key) { - tree.delete(&old_key)?; - } - old_diff = old_diff_iter.next(); - } - Ordering::Greater => { - // the value was inserted - if (self.merkle_tree_key_filter)(&new_key) { - tree.update( - &new_key, - if is_pending_transfer_key(&new_key) { - target_height.serialize_to_vec() - } else { - new.1.clone() - }, - )?; - } - new_diff = new_diff_iter.next(); - } - } - } - (Some(old), None) => { - // the value was deleted - let key = Key::parse(old.0.clone()) - .expect("the key should be parsable"); - - if (self.merkle_tree_key_filter)(&key) { - tree.delete(&key)?; - } - - old_diff = old_diff_iter.next(); - } - (None, Some(new)) => { - // the value was inserted - let key = Key::parse(new.0.clone()) - .expect("the key should be parsable"); - - if (self.merkle_tree_key_filter)(&key) { - tree.update( - &key, - if is_pending_transfer_key(&key) { - target_height.serialize_to_vec() - } else { - new.1.clone() - }, - )?; - } - - new_diff = new_diff_iter.next(); - } - (None, None) => break, - } - } - } - if let Some(st) = store_type { - // Add the base tree with the given height - let mut stores = self - .db - .read_merkle_tree_stores(epoch, height, Some(StoreType::Base))? - .ok_or(Error::NoMerkleTree { height })?; - let restored_stores = tree.stores(); - // Set the root and store of the rebuilt subtree - stores.set_root(&st, *restored_stores.root(&st)); - stores.set_store(restored_stores.store(&st).to_owned()); - tree = MerkleTree::::new_partial(stores); - } - Ok(tree) - } - - /// Get a Tendermint-compatible existence proof. - /// - /// Proofs from the Ethereum bridge pool are not - /// Tendermint-compatible. Requesting for a key - /// belonging to the bridge pool will cause this - /// method to error. - pub fn get_existence_proof( - &self, - key: &Key, - value: namada_merkle_tree::StorageBytes, - height: BlockHeight, - ) -> Result { - use std::array; - - // `0` means last committed height - let height = if height == BlockHeight(0) { - self.get_last_block_height() - } else { - height - }; - - if height > self.get_last_block_height() { - if let MembershipProof::ICS23(proof) = self - .block - .tree - .get_sub_tree_existence_proof(array::from_ref(key), vec![value]) - .map_err(Error::MerkleTreeError)? - { - self.block - .tree - .get_sub_tree_proof(key, proof) - .map(Into::into) - .map_err(Error::MerkleTreeError) - } else { - Err(Error::MerkleTreeError(MerkleTreeError::TendermintProof)) - } - } else { - let (store_type, _) = StoreType::sub_key(key)?; - let tree = self.get_merkle_tree(height, Some(store_type))?; - if let MembershipProof::ICS23(proof) = tree - .get_sub_tree_existence_proof(array::from_ref(key), vec![value]) - .map_err(Error::MerkleTreeError)? - { - tree.get_sub_tree_proof(key, proof) - .map(Into::into) - .map_err(Error::MerkleTreeError) - } else { - Err(Error::MerkleTreeError(MerkleTreeError::TendermintProof)) - } - } - } - - /// Get the non-existence proof - pub fn get_non_existence_proof( - &self, - key: &Key, - height: BlockHeight, - ) -> Result { - // `0` means last committed height - let height = if height == BlockHeight(0) { - self.get_last_block_height() - } else { - height - }; - - if height > self.get_last_block_height() { - Err(Error::Temporary { - error: format!( - "The block at the height {} hasn't committed yet", - height, - ), - }) - } else { - let (store_type, _) = StoreType::sub_key(key)?; - self.get_merkle_tree(height, Some(store_type))? - .get_non_existence_proof(key) - .map(Into::into) - .map_err(Error::MerkleTreeError) - } - } - - /// Get the current (yet to be committed) block epoch - pub fn get_current_epoch(&self) -> (Epoch, u64) { - ( - self.block.epoch, - EPOCH_TYPE_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, - ) - } - - /// Get the epoch of the last committed block - pub fn get_last_epoch(&self) -> (Epoch, u64) { - ( - self.last_epoch, - EPOCH_TYPE_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, - ) - } - - /// Initialize the first epoch. The first epoch begins at genesis time. - pub fn init_genesis_epoch( - &mut self, - initial_height: BlockHeight, - genesis_time: DateTimeUtc, - parameters: &Parameters, - ) -> Result<()> { - let EpochDuration { - min_num_of_blocks, - min_duration, - } = parameters.epoch_duration; - self.next_epoch_min_start_height = initial_height + min_num_of_blocks; - self.next_epoch_min_start_time = genesis_time + min_duration; - self.block.pred_epochs = Epochs { - first_block_heights: vec![initial_height], - }; - self.update_epoch_in_merkle_tree() - } - /// Get the block header - pub fn get_block_header( + fn get_block_header( &self, height: Option, ) -> Result<(Option
, u64)> { match height { - Some(h) if h == self.get_block_height().0 => { - let header = self.header.clone(); + Some(h) if h == self.in_mem().get_block_height().0 => { + let header = self.in_mem().header.clone(); let gas = match header { Some(ref header) => { header.encoded_len() as u64 * MEMORY_ACCESS_GAS_PER_BYTE @@ -829,7 +150,7 @@ where }; Ok((header, gas)) } - Some(h) => match self.db.read_block_header(h)? { + Some(h) => match self.db().read_block_header(h)? { Some(header) => { let gas = header.encoded_len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE; @@ -837,260 +158,284 @@ where } None => Ok((None, STORAGE_ACCESS_GAS_PER_BYTE)), }, - None => Ok((self.header.clone(), STORAGE_ACCESS_GAS_PER_BYTE)), + None => { + Ok((self.in_mem().header.clone(), STORAGE_ACCESS_GAS_PER_BYTE)) + } } } +} - /// Get the timestamp of the last committed block, or the current timestamp - /// if no blocks have been produced yet - pub fn get_last_block_timestamp(&self) -> Result { - let last_block_height = self.get_block_height().0; +/// Common trait for write log, DB and in-memory state. +pub trait State: StateRead + StorageWrite { + /// Borrow mutable `WriteLog` + fn write_log_mut(&mut self) -> &mut WriteLog; - Ok(self - .db - .read_block_header(last_block_height)? - .map_or_else(DateTimeUtc::now, |header| header.time)) - } + /// Splitting borrow to get mutable reference to `WriteLog`, immutable + /// reference to the `InMemory` state and DB when in need of both (avoids + /// complain from the borrow checker) + fn split_borrow(&mut self) + -> (&mut WriteLog, &InMemory, &Self::D); - /// Get the current conversions - pub fn get_conversion_state(&self) -> &ConversionState { - &self.conversion_state + /// Write the provided tx hash to write log. + fn write_tx_hash(&mut self, hash: Hash) -> write_log::Result<()> { + self.write_log_mut().write_tx_hash(hash) } +} - /// Update the merkle tree with epoch data - fn update_epoch_in_merkle_tree(&mut self) -> Result<()> { - let key_prefix: Key = - Address::Internal(InternalAddress::PoS).to_db_key().into(); - - let key = key_prefix - .push(&"epoch_start_height".to_string()) - .map_err(Error::KeyError)?; - self.block - .tree - .update(&key, encode(&self.next_epoch_min_start_height))?; - - let key = key_prefix - .push(&"epoch_start_time".to_string()) - .map_err(Error::KeyError)?; - self.block - .tree - .update(&key, encode(&self.next_epoch_min_start_time))?; - - let key = key_prefix - .push(&"current_epoch".to_string()) - .map_err(Error::KeyError)?; - self.block.tree.update(&key, encode(&self.block.epoch))?; - - Ok(()) - } +#[macro_export] +macro_rules! impl_storage_read { + ($($type:ty)*) => { + impl StorageRead for $($type)* + where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + { + type PrefixIter<'iter> = PrefixIter<'iter, D> where Self: 'iter; + + fn read_bytes( + &self, + key: &storage::Key, + ) -> namada_storage::Result>> { + // try to read from the write log first + let (log_val, gas) = self.write_log().read(key); + self.charge_gas(gas).into_storage_result()?; + match log_val { + Some(write_log::StorageModification::Write { ref value }) => { + Ok(Some(value.clone())) + } + Some(write_log::StorageModification::Delete) => Ok(None), + Some(write_log::StorageModification::InitAccount { + ref vp_code_hash, + }) => Ok(Some(vp_code_hash.to_vec())), + Some(write_log::StorageModification::Temp { ref value }) => { + Ok(Some(value.clone())) + } + None => { + // when not found in write log, try to read from the storage + let (value, gas) = self.db_read(key).into_storage_result()?; + self.charge_gas(gas).into_storage_result()?; + Ok(value) + } + } + } - /// Start write batch. - pub fn batch() -> D::WriteBatch { - D::batch() - } + fn has_key(&self, key: &storage::Key) -> namada_storage::Result { + // try to read from the write log first + let (log_val, gas) = self.write_log().read(key); + self.charge_gas(gas).into_storage_result()?; + match log_val { + Some(&write_log::StorageModification::Write { .. }) + | Some(&write_log::StorageModification::InitAccount { .. }) + | Some(&write_log::StorageModification::Temp { .. }) => Ok(true), + Some(&write_log::StorageModification::Delete) => { + // the given key has been deleted + Ok(false) + } + None => { + // when not found in write log, try to check the storage + let (present, gas) = self.db_has_key(key).into_storage_result()?; + self.charge_gas(gas).into_storage_result()?; + Ok(present) + } + } + } - /// Execute write batch. - pub fn exec_batch(&mut self, batch: D::WriteBatch) -> Result<()> { - Ok(self.db.exec_batch(batch)?) - } + fn iter_prefix<'iter>( + &'iter self, + prefix: &storage::Key, + ) -> namada_storage::Result> { + let (iter, gas) = + iter_prefix_post(self.write_log(), self.db(), prefix); + self.charge_gas(gas).into_storage_result()?; + Ok(iter) + } - /// Batch write the value with the given height and account subspace key to - /// the DB. Returns the size difference from previous value, if any, or - /// the size of the value otherwise. - pub fn batch_write_subspace_val( - &mut self, - batch: &mut D::WriteBatch, - key: &Key, - value: impl AsRef<[u8]>, - ) -> Result { - let value = value.as_ref(); - let is_key_merklized = (self.merkle_tree_key_filter)(key); - - if is_pending_transfer_key(key) { - // The tree of the bridge pool stores the current height for the - // pending transfer - let height = self.block.height.serialize_to_vec(); - self.block.tree.update(key, height)?; - } else { - // Update the merkle tree - if is_key_merklized { - self.block.tree.update(key, value)?; + fn iter_next<'iter>( + &'iter self, + iter: &mut Self::PrefixIter<'iter>, + ) -> namada_storage::Result)>> { + iter.next().map(|(key, val, gas)| { + self.charge_gas(gas).into_storage_result()?; + Ok((key, val)) + }).transpose() } - } - Ok(self.db.batch_write_subspace_val( - batch, - self.block.height, - key, - value, - is_key_merklized, - )?) - } - /// Batch delete the value with the given height and account subspace key - /// from the DB. Returns the size of the removed value, if any, 0 if no - /// previous value was found. - pub fn batch_delete_subspace_val( - &mut self, - batch: &mut D::WriteBatch, - key: &Key, - ) -> Result { - let is_key_merklized = (self.merkle_tree_key_filter)(key); - // Update the merkle tree - if is_key_merklized { - self.block.tree.delete(key)?; - } - Ok(self.db.batch_delete_subspace_val( - batch, - self.block.height, - key, - is_key_merklized, - )?) - } + fn get_chain_id( + &self, + ) -> std::result::Result { + let (chain_id, gas) = self.in_mem().get_chain_id(); + self.charge_gas(gas).into_storage_result()?; + Ok(chain_id) + } - // Prune merkle tree stores. Use after updating self.block.height in the - // commit. - fn prune_merkle_tree_stores( - &mut self, - batch: &mut D::WriteBatch, - ) -> Result<()> { - if self.block.epoch.0 == 0 { - return Ok(()); - } - // Prune non-provable stores at the previous epoch - for st in StoreType::iter_non_provable() { - self.db.prune_merkle_tree_store( - batch, - st, - self.block.epoch.prev(), - )?; - } - // Prune provable stores - let oldest_epoch = self.get_oldest_epoch(); - if oldest_epoch.0 > 0 { - // Remove stores at the previous epoch because the Merkle tree - // stores at the starting height of the epoch would be used to - // restore stores at a height (> oldest_height) in the epoch - for st in StoreType::iter_provable() { - self.db.prune_merkle_tree_store( - batch, - st, - oldest_epoch.prev(), - )?; + fn get_block_height( + &self, + ) -> std::result::Result { + let (height, gas) = self.in_mem().get_block_height(); + self.charge_gas(gas).into_storage_result()?; + Ok(height) } - // Prune the BridgePool subtree stores with invalid nonce - let mut epoch = match self.get_oldest_epoch_with_valid_nonce()? { - Some(epoch) => epoch, - None => return Ok(()), - }; - while oldest_epoch < epoch { - epoch = epoch.prev(); - self.db.prune_merkle_tree_store( - batch, - &StoreType::BridgePool, - epoch, - )?; + fn get_block_header( + &self, + height: storage::BlockHeight, + ) -> std::result::Result, namada_storage::Error> + { + let (header, gas) = + StateRead::get_block_header(self, Some(height)).into_storage_result()?; + self.charge_gas(gas).into_storage_result()?; + Ok(header) } - } - Ok(()) - } + fn get_block_hash( + &self, + ) -> std::result::Result { + let (hash, gas) = self.in_mem().get_block_hash(); + self.charge_gas(gas).into_storage_result()?; + Ok(hash) + } - /// Get the height of the last committed block or 0 if no block has been - /// committed yet. The first block is at height 1. - pub fn get_last_block_height(&self) -> BlockHeight { - self.last_block - .as_ref() - .map(|b| b.height) - .unwrap_or_default() - } + fn get_block_epoch( + &self, + ) -> std::result::Result { + let (epoch, gas) = self.in_mem().get_current_epoch(); + self.charge_gas(gas).into_storage_result()?; + Ok(epoch) + } - /// Get the oldest epoch where we can read a value - pub fn get_oldest_epoch(&self) -> Epoch { - let oldest_height = match self.storage_read_past_height_limit { - Some(limit) if limit < self.get_last_block_height().0 => { - (self.get_last_block_height().0 - limit).into() + fn get_pred_epochs(&self) -> namada_storage::Result { + self.charge_gas( + namada_gas::STORAGE_ACCESS_GAS_PER_BYTE, + ).into_storage_result()?; + Ok(self.in_mem().block.pred_epochs.clone()) } - _ => BlockHeight(1), - }; - self.block - .pred_epochs - .get_epoch(oldest_height) - .unwrap_or_default() + + fn get_tx_index( + &self, + ) -> std::result::Result { + self.charge_gas( + namada_gas::STORAGE_ACCESS_GAS_PER_BYTE, + ).into_storage_result()?; + Ok(self.in_mem().tx_index) + } + + fn get_native_token(&self) -> namada_storage::Result
{ + self.charge_gas( + namada_gas::STORAGE_ACCESS_GAS_PER_BYTE, + ).into_storage_result()?; + Ok(self.in_mem().native_token.clone()) + } + } } +} - /// Get oldest epoch which has the valid signed nonce of the bridge pool - fn get_oldest_epoch_with_valid_nonce(&self) -> Result> { - let last_height = self.get_last_block_height(); - let current_nonce = match self - .db - .read_bridge_pool_signed_nonce(last_height, last_height)? +#[macro_export] +macro_rules! impl_storage_write { + ($($type:ty)*) => { + impl StorageWrite for $($type)* + where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, { - Some(nonce) => nonce, - None => return Ok(None), - }; - let (mut epoch, _) = self.get_last_epoch(); - // We don't need to check the older epochs because their Merkle tree - // snapshots have been already removed - let oldest_epoch = self.get_oldest_epoch(); - // Look up the last valid epoch which has the previous nonce of the - // current one. It has the previous nonce, but it was - // incremented during the epoch. - while 0 < epoch.0 && oldest_epoch <= epoch { - epoch = epoch.prev(); - let height = - match self.block.pred_epochs.get_start_height_of_epoch(epoch) { - Some(h) => h, - None => continue, - }; - let nonce = match self - .db - .read_bridge_pool_signed_nonce(height, last_height)? - { - Some(nonce) => nonce, - // skip pruning when the old epoch doesn't have the signed nonce - None => break, - }; - if nonce < current_nonce { - break; + fn write_bytes( + &mut self, + key: &storage::Key, + val: impl AsRef<[u8]>, + ) -> namada_storage::Result<()> { + let (gas, _size_diff) = self + .write_log_mut() + .write(key, val.as_ref().to_vec()) + .into_storage_result()?; + self.charge_gas(gas).into_storage_result()?; + Ok(()) + } + + fn delete(&mut self, key: &storage::Key) -> namada_storage::Result<()> { + let (gas, _size_diff) = self + .write_log_mut() + .delete(key) + .into_storage_result()?; + self.charge_gas(gas).into_storage_result()?; + Ok(()) } } - Ok(Some(epoch)) - } + }; +} - /// Check it the given transaction's hash is already present in storage - pub fn has_replay_protection_entry(&self, hash: &Hash) -> Result { - Ok(self.db.has_replay_protection_entry(hash)?) - } +// Note: `FullAccessState` writes to a write-log at block-level, while all the +// other `StorageWrite` impls write at tx-level. +macro_rules! impl_storage_write_by_protocol { + ($($type:ty)*) => { + impl StorageWrite for $($type)* + where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + { + fn write_bytes( + &mut self, + key: &storage::Key, + val: impl AsRef<[u8]>, + ) -> namada_storage::Result<()> { + self + .write_log_mut() + .protocol_write(key, val.as_ref().to_vec()) + .into_storage_result()?; + Ok(()) + } - /// Write the provided tx hash to storage - pub fn write_replay_protection_entry( - &mut self, - batch: &mut D::WriteBatch, - key: &Key, - ) -> Result<()> { - self.db.write_replay_protection_entry(batch, key)?; - Ok(()) - } + fn delete(&mut self, key: &storage::Key) -> namada_storage::Result<()> { + self + .write_log_mut() + .protocol_delete(key) + .into_storage_result()?; + Ok(()) + } + } + }; +} - /// Delete the provided tx hash from storage - pub fn delete_replay_protection_entry( - &mut self, - batch: &mut D::WriteBatch, - key: &Key, - ) -> Result<()> { - self.db.delete_replay_protection_entry(batch, key)?; - Ok(()) - } +impl_storage_read!(FullAccessState); +impl_storage_read!(WlState); +impl_storage_read!(TempWlState<'_, D, H>); +impl_storage_write_by_protocol!(FullAccessState); +impl_storage_write_by_protocol!(WlState); +impl_storage_write_by_protocol!(TempWlState<'_, D, H>); - /// Iterate the replay protection storage from the last block - pub fn iter_replay_protection( - &self, - ) -> Box + '_> { - Box::new(self.db.iter_replay_protection().map(|(raw_key, _, _)| { - raw_key.parse().expect("Failed hash conversion") - })) - } +impl_storage_read!(TxHostEnvState<'_, D, H>); +impl_storage_read!(VpHostEnvState<'_, D, H>); +impl_storage_write!(TxHostEnvState<'_, D, H>); + +pub fn merklize_all_keys(_key: &storage::Key) -> bool { + true +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum Error { + #[error("TEMPORARY error: {error}")] + Temporary { error: String }, + #[error("Found an unknown key: {key}")] + UnknownKey { key: String }, + #[error("Storage key error {0}")] + KeyError(namada_core::storage::Error), + #[error("Coding error: {0}")] + CodingError(#[from] namada_core::DecodeError), + #[error("Merkle tree error: {0}")] + MerkleTreeError(MerkleTreeError), + #[error("DB error: {0}")] + DBError(String), + #[error("Borsh (de)-serialization error: {0}")] + BorshCodingError(std::io::Error), + #[error("Merkle tree at the height {height} is not stored")] + NoMerkleTree { height: BlockHeight }, + #[error("Code hash error: {0}")] + InvalidCodeHash(HashError), + #[error("DB error: {0}")] + DbError(#[from] namada_storage::DbError), + #[error("{0}")] + Gas(namada_gas::Error), + #[error("{0}")] + StorageError(#[from] namada_storage::Error), } impl From for Error { @@ -1099,26 +444,169 @@ impl From for Error { } } +/// Prefix iterator for [`StorageRead`] implementations. +#[derive(Debug)] +pub struct PrefixIter<'iter, D> +where + D: DB + DBIter<'iter>, +{ + /// Peekable storage iterator + pub storage_iter: Peekable<>::PrefixIter>, + /// Peekable write log iterator + pub write_log_iter: Peekable, +} + +/// Iterate write-log storage items prior to a tx execution, matching the +/// given prefix. Returns the iterator and gas cost. +pub fn iter_prefix_pre<'a, D>( + // We cannot use e.g. `&'a State`, because it doesn't live long + // enough - the lifetime of the `PrefixIter` must depend on the lifetime of + // references to the `WriteLog` and `DB`. + write_log: &'a WriteLog, + db: &'a D, + prefix: &storage::Key, +) -> (PrefixIter<'a, D>, u64) +where + D: DB + for<'iter> DBIter<'iter>, +{ + let storage_iter = db.iter_prefix(Some(prefix)).peekable(); + let write_log_iter = write_log.iter_prefix_pre(prefix).peekable(); + ( + PrefixIter:: { + storage_iter, + write_log_iter, + }, + prefix.len() as u64 * namada_gas::STORAGE_ACCESS_GAS_PER_BYTE, + ) +} + +/// Iterate write-log storage items posterior to a tx execution, matching the +/// given prefix. Returns the iterator and gas cost. +pub fn iter_prefix_post<'a, D>( + // We cannot use e.g. `&'a State`, because it doesn't live long + // enough - the lifetime of the `PrefixIter` must depend on the lifetime of + // references to the `WriteLog` and `DB`. + write_log: &'a WriteLog, + db: &'a D, + prefix: &storage::Key, +) -> (PrefixIter<'a, D>, u64) +where + D: DB + for<'iter> DBIter<'iter>, +{ + let storage_iter = db.iter_prefix(Some(prefix)).peekable(); + let write_log_iter = write_log.iter_prefix_post(prefix).peekable(); + ( + PrefixIter:: { + storage_iter, + write_log_iter, + }, + prefix.len() as u64 * namada_gas::STORAGE_ACCESS_GAS_PER_BYTE, + ) +} + +impl<'iter, D> Iterator for PrefixIter<'iter, D> +where + D: DB + DBIter<'iter>, +{ + type Item = (String, Vec, u64); + + fn next(&mut self) -> Option { + enum Next { + ReturnWl { advance_storage: bool }, + ReturnStorage, + } + loop { + let what: Next; + { + let storage_peeked = self.storage_iter.peek(); + let wl_peeked = self.write_log_iter.peek(); + match (storage_peeked, wl_peeked) { + (None, None) => return None, + (None, Some(_)) => { + what = Next::ReturnWl { + advance_storage: false, + }; + } + (Some(_), None) => { + what = Next::ReturnStorage; + } + (Some((storage_key, _, _)), Some((wl_key, _))) => { + if wl_key <= storage_key { + what = Next::ReturnWl { + advance_storage: wl_key == storage_key, + }; + } else { + what = Next::ReturnStorage; + } + } + } + } + match what { + Next::ReturnWl { advance_storage } => { + if advance_storage { + let _ = self.storage_iter.next(); + } + + if let Some((key, modification)) = + self.write_log_iter.next() + { + match modification { + write_log::StorageModification::Write { value } + | write_log::StorageModification::Temp { value } => { + let gas = value.len() as u64; + return Some((key, value, gas)); + } + write_log::StorageModification::InitAccount { + vp_code_hash, + } => { + let gas = vp_code_hash.len() as u64; + return Some((key, vp_code_hash.to_vec(), gas)); + } + write_log::StorageModification::Delete => { + continue; + } + } + } + } + Next::ReturnStorage => { + if let Some(next) = self.storage_iter.next() { + return Some(next); + } + } + } + } + } +} + /// Helpers for testing components that depend on storage #[cfg(any(test, feature = "testing"))] pub mod testing { - use namada_core::types::address; - use namada_core::types::hash::Sha256Hasher; + use namada_core::address; + use namada_core::address::EstablishedAddressGen; + use namada_core::chain::ChainId; + use namada_core::time::DateTimeUtc; + use namada_storage::tx_queue::{ExpiredTxsQueue, TxQueue}; use super::mockdb::MockDB; use super::*; - /// `WlStorage` with a mock DB for testing - pub type TestWlStorage = WlStorage; + pub type TestState = FullAccessState; - /// Storage with a mock DB for testing. - /// - /// Prefer to use [`TestWlStorage`], which implements - /// `namada_storageStorageRead + StorageWrite` with properly working - /// `prefix_iter`. - pub type TestStorage = State; + impl Default for TestState { + fn default() -> Self { + Self(WlState { + write_log: Default::default(), + db: MockDB::default(), + in_mem: Default::default(), + merkle_tree_key_filter: merklize_all_keys, + }) + } + } + + /// In memory State for testing. + pub type InMemoryState = InMemory; - impl Default for TestStorage { + impl Default for InMemoryState { fn default() -> Self { let chain_id = ChainId::default(); let tree = MerkleTree::default(); @@ -1131,7 +619,6 @@ pub mod testing { results: BlockResults::default(), }; Self { - db: MockDB::default(), chain_id, block, header: None, @@ -1147,21 +634,10 @@ pub mod testing { conversion_state: ConversionState::default(), tx_queue: TxQueue::default(), expired_txs_queue: ExpiredTxsQueue::default(), - native_token: address::nam(), + native_token: address::testing::nam(), ethereum_height: None, eth_events_queue: EthEventsQueue::default(), storage_read_past_height_limit: Some(1000), - merkle_tree_key_filter: merklize_all_keys, - } - } - } - - #[allow(clippy::derivable_impls)] - impl Default for TestWlStorage { - fn default() -> Self { - Self { - write_log: Default::default(), - storage: Default::default(), } } } @@ -1172,12 +648,18 @@ mod tests { use std::collections::BTreeMap; use chrono::{TimeZone, Utc}; - use namada_core::types::dec::Dec; - use namada_core::types::time::{self, Duration}; - use namada_core::types::token; - use namada_parameters::Parameters; + use namada_core::address::InternalAddress; + use namada_core::borsh::{BorshDeserialize, BorshSerializeExt}; + use namada_core::dec::Dec; + use namada_core::storage::DbKeySeg; + use namada_core::time::{self, DateTimeUtc, Duration}; + use namada_core::token; + use namada_parameters::{EpochDuration, Parameters}; use proptest::prelude::*; use proptest::test_runner::Config; + // Use `RUST_LOG=info` (or another tracing level) and `--nocapture` to + // see `tracing` logs from tests + use test_log::test; use super::testing::*; use super::*; @@ -1239,17 +721,11 @@ mod tests { min_blocks_delta, min_duration_delta, max_time_per_block_delta) in arb_and_epoch_duration_start_and_block()) { - let mut wl_storage = - TestWlStorage { - storage: TestStorage { - next_epoch_min_start_height: - start_height + epoch_duration.min_num_of_blocks, - next_epoch_min_start_time: - start_time + epoch_duration.min_duration, - ..Default::default() - }, - ..Default::default() - }; + let mut state =TestState::default(); + state.in_mem_mut().next_epoch_min_start_height= + start_height + epoch_duration.min_num_of_blocks; + state.in_mem_mut().next_epoch_min_start_time= + start_time + epoch_duration.min_duration; let mut parameters = Parameters { max_tx_bytes: 1024 * 1024, max_proposal_bytes: Default::default(), @@ -1267,19 +743,20 @@ mod tests { fee_unshielding_descriptions_limit: 15, minimum_gas_price: BTreeMap::default(), }; - namada_parameters::init_storage(¶meters, &mut wl_storage).unwrap(); + namada_parameters::init_storage(¶meters, &mut state).unwrap(); // Initialize pred_epochs to the current height - wl_storage - .storage + let height = state.in_mem().block.height; + state + .in_mem_mut() .block .pred_epochs - .new_epoch(wl_storage.storage.block.height); + .new_epoch(height); - let epoch_before = wl_storage.storage.last_epoch; - assert_eq!(epoch_before, wl_storage.storage.block.epoch); + let epoch_before = state.in_mem().last_epoch; + assert_eq!(epoch_before, state.in_mem().block.epoch); // Try to apply the epoch update - wl_storage.update_epoch(block_height, block_time).unwrap(); + state.update_epoch(block_height, block_time).unwrap(); // Test for 1. if block_height.0 - start_height.0 @@ -1291,43 +768,43 @@ mod tests { ) { // Update will now be enqueued for 2 blocks in the future - assert_eq!(wl_storage.storage.block.epoch, epoch_before); - assert_eq!(wl_storage.storage.update_epoch_blocks_delay, Some(2)); + assert_eq!(state.in_mem().block.epoch, epoch_before); + assert_eq!(state.in_mem().update_epoch_blocks_delay, Some(2)); let block_height = block_height + 1; let block_time = block_time + Duration::seconds(1); - wl_storage.update_epoch(block_height, block_time).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before); - assert_eq!(wl_storage.storage.update_epoch_blocks_delay, Some(1)); + state.update_epoch(block_height, block_time).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before); + assert_eq!(state.in_mem().update_epoch_blocks_delay, Some(1)); let block_height = block_height + 1; let block_time = block_time + Duration::seconds(1); - wl_storage.update_epoch(block_height, block_time).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before.next()); - assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); + state.update_epoch(block_height, block_time).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before.next()); + assert!(state.in_mem().update_epoch_blocks_delay.is_none()); - assert_eq!(wl_storage.storage.next_epoch_min_start_height, + assert_eq!(state.in_mem().next_epoch_min_start_height, block_height + epoch_duration.min_num_of_blocks); - assert_eq!(wl_storage.storage.next_epoch_min_start_time, + assert_eq!(state.in_mem().next_epoch_min_start_time, block_time + epoch_duration.min_duration); assert_eq!( - wl_storage.storage.block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), + state.in_mem().block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), Some(epoch_before)); assert_eq!( - wl_storage.storage.block.pred_epochs.get_epoch(block_height), + state.in_mem().block.pred_epochs.get_epoch(block_height), Some(epoch_before.next())); } else { - assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); - assert_eq!(wl_storage.storage.block.epoch, epoch_before); + assert!(state.in_mem().update_epoch_blocks_delay.is_none()); + assert_eq!(state.in_mem().block.epoch, epoch_before); assert_eq!( - wl_storage.storage.block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), + state.in_mem().block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), Some(epoch_before)); assert_eq!( - wl_storage.storage.block.pred_epochs.get_epoch(block_height), + state.in_mem().block.pred_epochs.get_epoch(block_height), Some(epoch_before)); } // Last epoch should only change when the block is committed - assert_eq!(wl_storage.storage.last_epoch, epoch_before); + assert_eq!(state.in_mem().last_epoch, epoch_before); // Update the epoch duration parameters parameters.epoch_duration.min_num_of_blocks = @@ -1337,57 +814,57 @@ mod tests { Duration::seconds(min_duration + min_duration_delta).into(); parameters.max_expected_time_per_block = Duration::seconds(max_expected_time_per_block + max_time_per_block_delta).into(); - namada_parameters::update_max_expected_time_per_block_parameter(&mut wl_storage, ¶meters.max_expected_time_per_block).unwrap(); - namada_parameters::update_epoch_parameter(&mut wl_storage, ¶meters.epoch_duration).unwrap(); + namada_parameters::update_max_expected_time_per_block_parameter(&mut state, ¶meters.max_expected_time_per_block).unwrap(); + namada_parameters::update_epoch_parameter(&mut state, ¶meters.epoch_duration).unwrap(); // Test for 2. - let epoch_before = wl_storage.storage.block.epoch; - let height_of_update = wl_storage.storage.next_epoch_min_start_height.0 ; - let time_of_update = wl_storage.storage.next_epoch_min_start_time; + let epoch_before = state.in_mem().block.epoch; + let height_of_update = state.in_mem().next_epoch_min_start_height.0 ; + let time_of_update = state.in_mem().next_epoch_min_start_time; let height_before_update = BlockHeight(height_of_update - 1); let height_of_update = BlockHeight(height_of_update); let time_before_update = time_of_update - Duration::seconds(1); // No update should happen before both epoch duration conditions are // satisfied - wl_storage.update_epoch(height_before_update, time_before_update).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before); - assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); - wl_storage.update_epoch(height_of_update, time_before_update).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before); - assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); - wl_storage.update_epoch(height_before_update, time_of_update).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before); - assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); + state.update_epoch(height_before_update, time_before_update).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before); + assert!(state.in_mem().update_epoch_blocks_delay.is_none()); + state.update_epoch(height_of_update, time_before_update).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before); + assert!(state.in_mem().update_epoch_blocks_delay.is_none()); + state.update_epoch(height_before_update, time_of_update).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before); + assert!(state.in_mem().update_epoch_blocks_delay.is_none()); // Update should be enqueued for 2 blocks in the future starting at or after this height and time - wl_storage.update_epoch(height_of_update, time_of_update).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before); - assert_eq!(wl_storage.storage.update_epoch_blocks_delay, Some(2)); + state.update_epoch(height_of_update, time_of_update).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before); + assert_eq!(state.in_mem().update_epoch_blocks_delay, Some(2)); // Increment the block height and time to simulate new blocks now let height_of_update = height_of_update + 1; let time_of_update = time_of_update + Duration::seconds(1); - wl_storage.update_epoch(height_of_update, time_of_update).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before); - assert_eq!(wl_storage.storage.update_epoch_blocks_delay, Some(1)); + state.update_epoch(height_of_update, time_of_update).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before); + assert_eq!(state.in_mem().update_epoch_blocks_delay, Some(1)); let height_of_update = height_of_update + 1; let time_of_update = time_of_update + Duration::seconds(1); - wl_storage.update_epoch(height_of_update, time_of_update).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before.next()); - assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); + state.update_epoch(height_of_update, time_of_update).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before.next()); + assert!(state.in_mem().update_epoch_blocks_delay.is_none()); // The next epoch's minimum duration should change - assert_eq!(wl_storage.storage.next_epoch_min_start_height, + assert_eq!(state.in_mem().next_epoch_min_start_height, height_of_update + parameters.epoch_duration.min_num_of_blocks); - assert_eq!(wl_storage.storage.next_epoch_min_start_time, + assert_eq!(state.in_mem().next_epoch_min_start_time, time_of_update + parameters.epoch_duration.min_duration); // Increment the block height and time once more to make sure things reset let height_of_update = height_of_update + 1; let time_of_update = time_of_update + Duration::seconds(1); - wl_storage.update_epoch(height_of_update, time_of_update).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before.next()); + state.update_epoch(height_of_update, time_of_update).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before.next()); } } @@ -1405,10 +882,10 @@ mod tests { #[test] fn test_writing_without_merklizing_or_diffs() { - let mut wls = TestWlStorage::default(); - assert_eq!(wls.storage.block.height.0, 0); + let mut state = TestState::default(); + assert_eq!(state.in_mem().block.height.0, 0); - (wls.storage.merkle_tree_key_filter) = merkle_tree_key_filter; + (state.0.merkle_tree_key_filter) = merkle_tree_key_filter; let key1 = test_key_1(); let val1 = 1u64; @@ -1416,60 +893,59 @@ mod tests { let val2 = 2u64; // Standard write of key-val-1 - wls.write(&key1, val1).unwrap(); + state.write(&key1, val1).unwrap(); - // Read from WlStorage should return val1 - let res = wls.read::(&key1).unwrap().unwrap(); + // Read from State should return val1 + let res = state.read::(&key1).unwrap().unwrap(); assert_eq!(res, val1); - // Read from Storage shouldn't return val1 bc the block hasn't been + // Read from DB shouldn't return val1 bc the block hasn't been // committed - let (res, _) = wls.storage.read(&key1).unwrap(); + let (res, _) = state.db_read(&key1).unwrap(); assert!(res.is_none()); // Write key-val-2 without merklizing or diffs - wls.write(&key2, val2).unwrap(); + state.write(&key2, val2).unwrap(); - // Read from WlStorage should return val2 - let res = wls.read::(&key2).unwrap().unwrap(); + // Read from state should return val2 + let res = state.read::(&key2).unwrap().unwrap(); assert_eq!(res, val2); // Commit block and storage changes - wls.commit_block().unwrap(); - wls.storage.block.height = wls.storage.block.height.next_height(); + state.commit_block().unwrap(); + state.in_mem_mut().block.height = + state.in_mem().block.height.next_height(); - // Read key1 from Storage should return val1 - let (res1, _) = wls.storage.read(&key1).unwrap(); + // Read key1 from DB should return val1 + let (res1, _) = state.db_read(&key1).unwrap(); let res1 = u64::try_from_slice(&res1.unwrap()).unwrap(); assert_eq!(res1, val1); // Check merkle tree inclusion of key-val-1 explicitly - let is_merklized1 = wls.storage.block.tree.has_key(&key1).unwrap(); + let is_merklized1 = state.in_mem().block.tree.has_key(&key1).unwrap(); assert!(is_merklized1); // Key2 should be in storage. Confirm by reading from - // WlStorage and also by reading Storage subspace directly - let res2 = wls.read::(&key2).unwrap().unwrap(); + // state and also by reading DB subspace directly + let res2 = state.read::(&key2).unwrap().unwrap(); assert_eq!(res2, val2); - let res2 = wls.storage.db.read_subspace_val(&key2).unwrap().unwrap(); + let res2 = state.db().read_subspace_val(&key2).unwrap().unwrap(); let res2 = u64::try_from_slice(&res2).unwrap(); assert_eq!(res2, val2); // Check explicitly that key-val-2 is not in merkle tree - let is_merklized2 = wls.storage.block.tree.has_key(&key2).unwrap(); + let is_merklized2 = state.in_mem().block.tree.has_key(&key2).unwrap(); assert!(!is_merklized2); // Check that the proper diffs exist for key-val-1 - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, Default::default(), true) .unwrap(); assert!(res1.is_none()); - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, Default::default(), false) .unwrap() .unwrap(); @@ -1478,15 +954,13 @@ mod tests { // Check that there are diffs for key-val-2 in block 0, since all keys // need to have diffs for at least 1 block for rollback purposes - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, BlockHeight(0), true) .unwrap(); assert!(res2.is_none()); - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, BlockHeight(0), false) .unwrap() .unwrap(); @@ -1494,88 +968,373 @@ mod tests { assert_eq!(res2, val2); // Now delete the keys properly - wls.delete(&key1).unwrap(); - wls.delete(&key2).unwrap(); + state.delete(&key1).unwrap(); + state.delete(&key2).unwrap(); // Commit the block again - wls.commit_block().unwrap(); - wls.storage.block.height = wls.storage.block.height.next_height(); + state.commit_block().unwrap(); + state.in_mem_mut().block.height = + state.in_mem().block.height.next_height(); // Check the key-vals are removed from the storage subspace - let res1 = wls.read::(&key1).unwrap(); - let res2 = wls.read::(&key2).unwrap(); + let res1 = state.read::(&key1).unwrap(); + let res2 = state.read::(&key2).unwrap(); assert!(res1.is_none() && res2.is_none()); - let res1 = wls.storage.db.read_subspace_val(&key1).unwrap(); - let res2 = wls.storage.db.read_subspace_val(&key2).unwrap(); + let res1 = state.db().read_subspace_val(&key1).unwrap(); + let res2 = state.db().read_subspace_val(&key2).unwrap(); assert!(res1.is_none() && res2.is_none()); // Check that the key-vals don't exist in the merkle tree anymore - let is_merklized1 = wls.storage.block.tree.has_key(&key1).unwrap(); - let is_merklized2 = wls.storage.block.tree.has_key(&key2).unwrap(); + let is_merklized1 = state.in_mem().block.tree.has_key(&key1).unwrap(); + let is_merklized2 = state.in_mem().block.tree.has_key(&key2).unwrap(); assert!(!is_merklized1 && !is_merklized2); // Check that key-val-1 diffs are properly updated for blocks 0 and 1 - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, BlockHeight(0), true) .unwrap(); assert!(res1.is_none()); - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, BlockHeight(0), false) .unwrap() .unwrap(); let res1 = u64::try_from_slice(&res1).unwrap(); assert_eq!(res1, val1); - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, BlockHeight(1), true) .unwrap() .unwrap(); let res1 = u64::try_from_slice(&res1).unwrap(); assert_eq!(res1, val1); - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, BlockHeight(1), false) .unwrap(); assert!(res1.is_none()); // Check that key-val-2 diffs don't exist for block 0 anymore - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, BlockHeight(0), true) .unwrap(); assert!(res2.is_none()); - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, BlockHeight(0), false) .unwrap(); assert!(res2.is_none()); // Check that the block 1 diffs for key-val-2 include an "old" value of // val2 and no "new" value - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, BlockHeight(1), true) .unwrap() .unwrap(); let res2 = u64::try_from_slice(&res2).unwrap(); assert_eq!(res2, val2); - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, BlockHeight(1), false) .unwrap(); assert!(res2.is_none()); } + + proptest! { + // Generate arb valid input for `test_prefix_iters_aux` + #![proptest_config(Config { + cases: 10, + .. Config::default() + })] + #[test] + fn test_prefix_iters( + key_vals in arb_key_vals(30), + ) { + test_prefix_iters_aux(key_vals) + } + } + + /// Check the `prefix_iter_pre` and `prefix_iter_post` return expected + /// values, generated in the input to this function + fn test_prefix_iters_aux(kvs: Vec>) { + let mut s = TestState::default(); + + // Partition the tx and storage kvs + let (tx_kvs, rest): (Vec<_>, Vec<_>) = kvs + .into_iter() + .partition(|(_key, val)| matches!(val, Level::TxWriteLog(_))); + // Partition the kvs to only apply block level first + let (block_kvs, storage_kvs): (Vec<_>, Vec<_>) = rest + .into_iter() + .partition(|(_key, val)| matches!(val, Level::BlockWriteLog(_))); + + // Apply the kvs in order of the levels + apply_to_state(&mut s, &storage_kvs); + apply_to_state(&mut s, &block_kvs); + apply_to_state(&mut s, &tx_kvs); + + // Collect the expected values in prior state - storage level then block + let mut expected_pre = BTreeMap::new(); + for (key, val) in storage_kvs { + if let Level::Storage(val) = val { + expected_pre.insert(key, val); + } + } + for (key, val) in &block_kvs { + if let Level::BlockWriteLog(WlMod::Write(val)) = val { + expected_pre.insert(key.clone(), *val); + } + } + for (key, val) in &block_kvs { + // Deletes have to be applied last + if let Level::BlockWriteLog(WlMod::Delete) = val { + expected_pre.remove(key); + } else if let Level::BlockWriteLog(WlMod::DeletePrefix) = val { + expected_pre.retain(|expected_key, _val| { + // Remove matching prefixes except for VPs + expected_key.is_validity_predicate().is_some() + || expected_key.split_prefix(key).is_none() + }) + } + } + + // Collect the values from prior state prefix iterator + let (iter_pre, _gas) = + iter_prefix_pre(s.write_log(), s.db(), &storage::Key::default()); + let mut read_pre = BTreeMap::new(); + for (key, val, _gas) in iter_pre { + let key = storage::Key::parse(key).unwrap(); + let val: i8 = BorshDeserialize::try_from_slice(&val).unwrap(); + read_pre.insert(key, val); + } + + // A helper for dbg + let keys_to_string = |kvs: &BTreeMap| { + kvs.iter() + .map(|(key, val)| (key.to_string(), *val)) + .collect::>() + }; + dbg!(keys_to_string(&expected_pre), keys_to_string(&read_pre)); + // Clone the prior expected kvs for posterior state check + let mut expected_post = expected_pre.clone(); + itertools::assert_equal(expected_pre, read_pre); + + // Collect the expected values in posterior state - all the levels + for (key, val) in &tx_kvs { + if let Level::TxWriteLog(WlMod::Write(val)) = val { + expected_post.insert(key.clone(), *val); + } + } + for (key, val) in &tx_kvs { + // Deletes have to be applied last + if let Level::TxWriteLog(WlMod::Delete) = val { + expected_post.remove(key); + } else if let Level::TxWriteLog(WlMod::DeletePrefix) = val { + expected_post.retain(|expected_key, _val| { + // Remove matching prefixes except for VPs + expected_key.is_validity_predicate().is_some() + || expected_key.split_prefix(key).is_none() + }) + } + } + + // Collect the values from posterior state prefix iterator + let (iter_post, _gas) = + iter_prefix_post(s.write_log(), s.db(), &storage::Key::default()); + let mut read_post = BTreeMap::new(); + for (key, val, _gas) in iter_post { + let key = storage::Key::parse(key).unwrap(); + let val: i8 = BorshDeserialize::try_from_slice(&val).unwrap(); + read_post.insert(key, val); + } + dbg!(keys_to_string(&expected_post), keys_to_string(&read_post)); + itertools::assert_equal(expected_post, read_post); + } + + fn apply_to_state(s: &mut TestState, kvs: &[KeyVal]) { + // Apply writes first + for (key, val) in kvs { + match val { + Level::TxWriteLog(WlMod::Delete | WlMod::DeletePrefix) + | Level::BlockWriteLog(WlMod::Delete | WlMod::DeletePrefix) => { + } + Level::TxWriteLog(WlMod::Write(val)) => { + s.write_log_mut() + .write(key, val.serialize_to_vec()) + .unwrap(); + } + Level::BlockWriteLog(WlMod::Write(val)) => { + s.write_log_mut() + // protocol only writes at block level + .protocol_write(key, val.serialize_to_vec()) + .unwrap(); + } + Level::Storage(val) => { + s.db_write(key, val.serialize_to_vec()).unwrap(); + } + } + } + // Then apply deletions + for (key, val) in kvs { + match val { + Level::TxWriteLog(WlMod::Delete) => { + s.write_log_mut().delete(key).unwrap(); + } + Level::BlockWriteLog(WlMod::Delete) => { + s.delete(key).unwrap(); + } + Level::TxWriteLog(WlMod::DeletePrefix) => { + // Find keys matching the prefix + let keys = namada_storage::iter_prefix_bytes(s, key) + .unwrap() + .map(|res| { + let (key, _val) = res.unwrap(); + key + }) + .collect::>(); + // Delete the matching keys + for key in keys { + // Skip validity predicates which cannot be deleted + if key.is_validity_predicate().is_none() { + s.write_log_mut().delete(&key).unwrap(); + } + } + } + Level::BlockWriteLog(WlMod::DeletePrefix) => { + s.delete_prefix(key).unwrap(); + } + _ => {} + } + } + } + + /// WlStorage key written in the write log or storage + type KeyVal = (storage::Key, Level); + + /// WlStorage write level + #[derive(Clone, Copy, Debug)] + enum Level { + TxWriteLog(WlMod), + BlockWriteLog(WlMod), + Storage(VAL), + } + + /// Write log modification + #[derive(Clone, Copy, Debug)] + enum WlMod { + Write(VAL), + Delete, + DeletePrefix, + } + + fn arb_key_vals(len: usize) -> impl Strategy>> { + // Start with some arb. storage key-vals + let storage_kvs = prop::collection::vec( + (storage::testing::arb_key(), any::()), + 1..len, + ) + .prop_map(|kvs| { + kvs.into_iter() + .filter_map(|(key, val)| { + if let DbKeySeg::AddressSeg(Address::Internal( + InternalAddress::EthBridgePool, + )) = key.segments[0] + { + None + } else { + Some((key, Level::Storage(val))) + } + }) + .collect::>() + }); + + // Select some indices to override in write log + let overrides = prop::collection::vec( + (any::(), any::(), any::()), + 1..len / 2, + ); + + // Select some indices to delete + let deletes = prop::collection::vec( + (any::(), any::()), + 1..len / 3, + ); + + // Select some indices to delete prefix + let delete_prefix = prop::collection::vec( + ( + any::(), + any::(), + // An arbitrary number of key segments to drop from a selected + // key to obtain the prefix. Because `arb_key` generates `2..5` + // segments, we can drop one less of its upper bound. + (2_usize..4), + ), + 1..len / 4, + ); + + // Combine them all together + (storage_kvs, overrides, deletes, delete_prefix).prop_map( + |(mut kvs, overrides, deletes, delete_prefix)| { + for (ix, val, is_tx) in overrides { + let (key, _) = ix.get(&kvs); + let wl_mod = WlMod::Write(val); + let lvl = if is_tx { + Level::TxWriteLog(wl_mod) + } else { + Level::BlockWriteLog(wl_mod) + }; + kvs.push((key.clone(), lvl)); + } + for (ix, is_tx) in deletes { + let (key, _) = ix.get(&kvs); + // We have to skip validity predicate keys as they cannot be + // deleted + if key.is_validity_predicate().is_some() { + continue; + } + let wl_mod = WlMod::Delete; + let lvl = if is_tx { + Level::TxWriteLog(wl_mod) + } else { + Level::BlockWriteLog(wl_mod) + }; + kvs.push((key.clone(), lvl)); + } + for (ix, is_tx, num_of_seg_to_drop) in delete_prefix { + let (key, _) = ix.get(&kvs); + let wl_mod = WlMod::DeletePrefix; + let lvl = if is_tx { + Level::TxWriteLog(wl_mod) + } else { + Level::BlockWriteLog(wl_mod) + }; + // Keep at least one segment + let num_of_seg_to_keep = std::cmp::max( + 1, + key.segments + .len() + .checked_sub(num_of_seg_to_drop) + .unwrap_or_default(), + ); + let prefix = storage::Key { + segments: key + .segments + .iter() + .take(num_of_seg_to_keep) + .cloned() + .collect(), + }; + kvs.push((prefix, lvl)); + } + kvs + }, + ) + } } diff --git a/crates/state/src/wl_state.rs b/crates/state/src/wl_state.rs new file mode 100644 index 0000000000..c90403ea3c --- /dev/null +++ b/crates/state/src/wl_state.rs @@ -0,0 +1,1192 @@ +use std::cmp::Ordering; +use std::ops::{Deref, DerefMut}; + +use namada_core::address::Address; +use namada_core::borsh::BorshSerializeExt; +use namada_core::chain::ChainId; +use namada_core::storage; +use namada_core::time::DateTimeUtc; +use namada_parameters::EpochDuration; +use namada_replay_protection as replay_protection; +use namada_storage::conversion_state::{ConversionState, WithConversionState}; +use namada_storage::{BlockHeight, BlockStateRead, BlockStateWrite, ResultExt}; + +use crate::in_memory::InMemory; +use crate::write_log::{ + self, ReProtStorageModification, StorageModification, WriteLog, +}; +use crate::{ + is_pending_transfer_key, DBIter, Epoch, Error, Hash, Key, LastBlock, + MembershipProof, MerkleTree, MerkleTreeError, ProofOps, Result, State, + StateRead, StorageHasher, StorageResult, StoreType, DB, + EPOCH_SWITCH_BLOCKS_DELAY, STORAGE_ACCESS_GAS_PER_BYTE, + STORAGE_WRITE_GAS_PER_BYTE, +}; + +/// Owned state with full R/W access. +#[derive(Debug)] +pub struct FullAccessState(pub(crate) WlState) +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher; + +/// State with a write-logged storage. +#[derive(Debug)] +pub struct WlState +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// Write log + pub(crate) write_log: WriteLog, + /// DB (usually a MockDB or PersistentDB) + /// In public API this is immutable in WlState (only mutable in + /// `FullAccessState`). + pub(crate) db: D, + /// State in memory + pub(crate) in_mem: InMemory, + /// Static merkle tree storage key filter + pub merkle_tree_key_filter: fn(&storage::Key) -> bool, +} + +/// State with a temporary write log. This is used for dry-running txs and ABCI +/// prepare and processs proposal, which must not modify the actual state. +#[derive(Debug)] +pub struct TempWlState<'a, D, H> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// Write log + pub(crate) write_log: WriteLog, + // DB + pub(crate) db: &'a D, + /// State + pub(crate) in_mem: &'a InMemory, +} + +impl FullAccessState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + pub fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.0.write_log + } + + pub fn in_mem_mut(&mut self) -> &mut InMemory { + &mut self.0.in_mem + } + + pub fn db_mut(&mut self) -> &mut D { + &mut self.0.db + } + + pub fn restrict_writes_to_write_log(&mut self) -> &mut WlState { + &mut self.0 + } + + pub fn read_only(&self) -> &WlState { + &self.0 + } + + pub fn open( + db_path: impl AsRef, + cache: Option<&D::Cache>, + chain_id: ChainId, + native_token: Address, + storage_read_past_height_limit: Option, + merkle_tree_key_filter: fn(&storage::Key) -> bool, + ) -> Self { + let write_log = WriteLog::default(); + let db = D::open(db_path, cache); + let in_mem = InMemory::new( + chain_id, + native_token, + storage_read_past_height_limit, + ); + let mut state = Self(WlState { + write_log, + db, + in_mem, + merkle_tree_key_filter, + }); + state.load_last_state(); + state + } + + #[allow(dead_code)] + /// Check if the given address exists on chain and return the gas cost. + pub fn db_exists(&self, addr: &Address) -> Result<(bool, u64)> { + let key = storage::Key::validity_predicate(addr); + self.db_has_key(&key) + } + + /// Initialize a new epoch when the current epoch is finished. Returns + /// `true` on a new epoch. + pub fn update_epoch( + &mut self, + height: BlockHeight, + time: DateTimeUtc, + ) -> StorageResult { + let parameters = namada_parameters::read(self) + .expect("Couldn't read protocol parameters"); + + match self.in_mem.update_epoch_blocks_delay.as_mut() { + None => { + // Check if the new epoch minimum start height and start time + // have been fulfilled. If so, queue the next + // epoch to start two blocks into the future so + // as to align validator set updates + etc with + // tendermint. This is because tendermint has a two block delay + // to validator changes. + let current_epoch_duration_satisfied = height + >= self.in_mem.next_epoch_min_start_height + && time >= self.in_mem.next_epoch_min_start_time; + if current_epoch_duration_satisfied { + self.in_mem.update_epoch_blocks_delay = + Some(EPOCH_SWITCH_BLOCKS_DELAY); + } + } + Some(blocks_until_switch) => { + *blocks_until_switch -= 1; + } + }; + let new_epoch = + matches!(self.in_mem.update_epoch_blocks_delay, Some(0)); + + if new_epoch { + // Reset the delay tracker + self.in_mem.update_epoch_blocks_delay = None; + + // Begin a new epoch + self.in_mem.block.epoch = self.in_mem.block.epoch.next(); + let EpochDuration { + min_num_of_blocks, + min_duration, + } = parameters.epoch_duration; + self.in_mem.next_epoch_min_start_height = + height + min_num_of_blocks; + self.in_mem.next_epoch_min_start_time = time + min_duration; + + self.in_mem.block.pred_epochs.new_epoch(height); + tracing::info!("Began a new epoch {}", self.in_mem.block.epoch); + } + Ok(new_epoch) + } + + /// Commit the current block's write log to the storage and commit the block + /// to DB. Starts a new block write log. + pub fn commit_block(&mut self) -> StorageResult<()> { + if self.in_mem.last_epoch != self.in_mem.block.epoch { + self.in_mem_mut() + .update_epoch_in_merkle_tree() + .into_storage_result()?; + } + + let mut batch = D::batch(); + self.commit_write_log_block(&mut batch) + .into_storage_result()?; + self.commit_block_from_batch(batch).into_storage_result() + } + + /// Commit the current block's write log to the storage. Starts a new block + /// write log. + pub fn commit_write_log_block( + &mut self, + batch: &mut D::WriteBatch, + ) -> Result<()> { + for (key, entry) in + std::mem::take(&mut self.0.write_log.block_write_log).into_iter() + { + match entry { + StorageModification::Write { value } => { + self.batch_write_subspace_val(batch, &key, value)?; + } + StorageModification::Delete => { + self.batch_delete_subspace_val(batch, &key)?; + } + StorageModification::InitAccount { vp_code_hash } => { + self.batch_write_subspace_val(batch, &key, vp_code_hash)?; + } + // temporary value isn't persisted + StorageModification::Temp { .. } => {} + } + } + debug_assert!(self.0.write_log.block_write_log.is_empty()); + + // Replay protections specifically + for (hash, entry) in + std::mem::take(&mut self.0.write_log.replay_protection).into_iter() + { + match entry { + ReProtStorageModification::Write => self + .write_replay_protection_entry( + batch, + // Can only write tx hashes to the previous block, no + // further + &replay_protection::last_key(&hash), + )?, + ReProtStorageModification::Delete => self + .delete_replay_protection_entry( + batch, + // Can only delete tx hashes from the previous block, + // no further + &replay_protection::last_key(&hash), + )?, + ReProtStorageModification::Finalize => { + self.write_replay_protection_entry( + batch, + &replay_protection::all_key(&hash), + )?; + self.delete_replay_protection_entry( + batch, + &replay_protection::last_key(&hash), + )?; + } + } + } + debug_assert!(self.0.write_log.replay_protection.is_empty()); + + if let Some(address_gen) = self.0.write_log.address_gen.take() { + self.0.in_mem.address_gen = address_gen + } + Ok(()) + } + + /// Start write batch. + pub fn batch() -> D::WriteBatch { + D::batch() + } + + /// Execute write batch. + pub fn exec_batch(&mut self, batch: D::WriteBatch) -> Result<()> { + Ok(self.db.exec_batch(batch)?) + } + + /// Batch write the value with the given height and account subspace key to + /// the DB. Returns the size difference from previous value, if any, or + /// the size of the value otherwise. + pub fn batch_write_subspace_val( + &mut self, + batch: &mut D::WriteBatch, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result { + let value = value.as_ref(); + let is_key_merklized = (self.merkle_tree_key_filter)(key); + + if is_pending_transfer_key(key) { + // The tree of the bridge pool stores the current height for the + // pending transfer + let height = self.in_mem.block.height.serialize_to_vec(); + self.in_mem.block.tree.update(key, height)?; + } else { + // Update the merkle tree + if is_key_merklized { + self.in_mem.block.tree.update(key, value)?; + } + } + Ok(self.db.batch_write_subspace_val( + batch, + self.in_mem.block.height, + key, + value, + is_key_merklized, + )?) + } + + /// Batch delete the value with the given height and account subspace key + /// from the DB. Returns the size of the removed value, if any, 0 if no + /// previous value was found. + pub fn batch_delete_subspace_val( + &mut self, + batch: &mut D::WriteBatch, + key: &Key, + ) -> Result { + let is_key_merklized = (self.merkle_tree_key_filter)(key); + // Update the merkle tree + if is_key_merklized { + self.in_mem.block.tree.delete(key)?; + } + Ok(self.db.batch_delete_subspace_val( + batch, + self.in_mem.block.height, + key, + is_key_merklized, + )?) + } + + // Prune merkle tree stores. Use after updating self.block.height in the + // commit. + fn prune_merkle_tree_stores( + &mut self, + batch: &mut D::WriteBatch, + ) -> Result<()> { + if self.in_mem.block.epoch.0 == 0 { + return Ok(()); + } + // Prune non-provable stores at the previous epoch + for st in StoreType::iter_non_provable() { + self.0.db.prune_merkle_tree_store( + batch, + st, + self.in_mem.block.epoch.prev(), + )?; + } + // Prune provable stores + let oldest_epoch = self.in_mem.get_oldest_epoch(); + if oldest_epoch.0 > 0 { + // Remove stores at the previous epoch because the Merkle tree + // stores at the starting height of the epoch would be used to + // restore stores at a height (> oldest_height) in the epoch + for st in StoreType::iter_provable() { + self.db.prune_merkle_tree_store( + batch, + st, + oldest_epoch.prev(), + )?; + } + + // Prune the BridgePool subtree stores with invalid nonce + let mut epoch = match self.get_oldest_epoch_with_valid_nonce()? { + Some(epoch) => epoch, + None => return Ok(()), + }; + while oldest_epoch < epoch { + epoch = epoch.prev(); + self.db.prune_merkle_tree_store( + batch, + &StoreType::BridgePool, + epoch, + )?; + } + } + + Ok(()) + } + + /// Check it the given transaction's hash is already present in storage + pub fn has_replay_protection_entry(&self, hash: &Hash) -> Result { + Ok(self.db.has_replay_protection_entry(hash)?) + } + + /// Write the provided tx hash to storage + pub fn write_replay_protection_entry( + &mut self, + batch: &mut D::WriteBatch, + key: &Key, + ) -> Result<()> { + self.db.write_replay_protection_entry(batch, key)?; + Ok(()) + } + + /// Delete the provided tx hash from storage + pub fn delete_replay_protection_entry( + &mut self, + batch: &mut D::WriteBatch, + key: &Key, + ) -> Result<()> { + self.db.delete_replay_protection_entry(batch, key)?; + Ok(()) + } + + /// Iterate the replay protection storage from the last block + pub fn iter_replay_protection( + &self, + ) -> Box + '_> { + Box::new(self.db.iter_replay_protection().map(|(raw_key, _, _)| { + raw_key.parse().expect("Failed hash conversion") + })) + } + + /// Get oldest epoch which has the valid signed nonce of the bridge pool + fn get_oldest_epoch_with_valid_nonce(&self) -> Result> { + let last_height = self.in_mem.get_last_block_height(); + let current_nonce = match self + .db + .read_bridge_pool_signed_nonce(last_height, last_height)? + { + Some(nonce) => nonce, + None => return Ok(None), + }; + let (mut epoch, _) = self.in_mem.get_last_epoch(); + // We don't need to check the older epochs because their Merkle tree + // snapshots have been already removed + let oldest_epoch = self.in_mem.get_oldest_epoch(); + // Look up the last valid epoch which has the previous nonce of the + // current one. It has the previous nonce, but it was + // incremented during the epoch. + while 0 < epoch.0 && oldest_epoch <= epoch { + epoch = epoch.prev(); + let height = match self + .in_mem + .block + .pred_epochs + .get_start_height_of_epoch(epoch) + { + Some(h) => h, + None => continue, + }; + let nonce = match self + .db + .read_bridge_pool_signed_nonce(height, last_height)? + { + Some(nonce) => nonce, + // skip pruning when the old epoch doesn't have the signed nonce + None => break, + }; + if nonce < current_nonce { + break; + } + } + Ok(Some(epoch)) + } + + /// Rebuild full Merkle tree after [`read_last_block()`] + fn rebuild_full_merkle_tree( + &self, + height: BlockHeight, + ) -> Result> { + self.get_merkle_tree(height, None) + } + + /// Load the full state at the last committed height, if any. Returns the + /// Merkle root hash and the height of the committed block. + fn load_last_state(&mut self) { + if let Some(BlockStateRead { + merkle_tree_stores, + hash, + height, + time, + epoch, + pred_epochs, + next_epoch_min_start_height, + next_epoch_min_start_time, + update_epoch_blocks_delay, + results, + address_gen, + conversion_state, + tx_queue, + ethereum_height, + eth_events_queue, + }) = self + .0 + .db + .read_last_block() + .expect("Read block call must not fail") + { + { + let in_mem = &mut self.0.in_mem; + in_mem.block.hash = hash.clone(); + in_mem.block.height = height; + in_mem.block.epoch = epoch; + in_mem.block.results = results; + in_mem.block.pred_epochs = pred_epochs; + in_mem.last_block = Some(LastBlock { height, hash, time }); + in_mem.last_epoch = epoch; + in_mem.next_epoch_min_start_height = + next_epoch_min_start_height; + in_mem.next_epoch_min_start_time = next_epoch_min_start_time; + in_mem.update_epoch_blocks_delay = update_epoch_blocks_delay; + in_mem.address_gen = address_gen; + } + + // Rebuild Merkle tree - requires the values above to be set first + let tree = MerkleTree::new(merkle_tree_stores) + .or_else(|_| self.rebuild_full_merkle_tree(height)) + .unwrap(); + + let in_mem = &mut self.0.in_mem; + in_mem.block.tree = tree; + in_mem.conversion_state = conversion_state; + in_mem.tx_queue = tx_queue; + in_mem.ethereum_height = ethereum_height; + in_mem.eth_events_queue = eth_events_queue; + tracing::debug!("Loaded storage from DB"); + } else { + tracing::info!("No state could be found"); + } + } + + /// Persist the block's state from batch writes to the database. + /// Note that unlike `commit_block` this method doesn't commit the write + /// log. + pub fn commit_block_from_batch( + &mut self, + mut batch: D::WriteBatch, + ) -> Result<()> { + // All states are written only when the first height or a new epoch + let is_full_commit = self.in_mem.block.height.0 == 1 + || self.in_mem.last_epoch != self.in_mem.block.epoch; + + // For convenience in tests, fill-in a header if it's missing. + // Normally, the header is added in `FinalizeBlock`. + #[cfg(any(test, feature = "testing"))] + { + if self.in_mem.header.is_none() { + self.in_mem.header = Some(storage::Header { + hash: Hash::default(), + time: DateTimeUtc::now(), + next_validators_hash: Hash::default(), + }); + } + } + + let state = BlockStateWrite { + merkle_tree_stores: self.in_mem.block.tree.stores(), + header: self.in_mem.header.as_ref(), + hash: &self.in_mem.block.hash, + height: self.in_mem.block.height, + time: self + .in_mem + .header + .as_ref() + .expect("Must have a block header on commit") + .time, + epoch: self.in_mem.block.epoch, + results: &self.in_mem.block.results, + pred_epochs: &self.in_mem.block.pred_epochs, + next_epoch_min_start_height: self + .in_mem + .next_epoch_min_start_height, + next_epoch_min_start_time: self.in_mem.next_epoch_min_start_time, + update_epoch_blocks_delay: self.in_mem.update_epoch_blocks_delay, + address_gen: &self.in_mem.address_gen, + conversion_state: &self.in_mem.conversion_state, + tx_queue: &self.in_mem.tx_queue, + ethereum_height: self.in_mem.ethereum_height.as_ref(), + eth_events_queue: &self.in_mem.eth_events_queue, + }; + self.db + .add_block_to_batch(state, &mut batch, is_full_commit)?; + let header = self + .in_mem + .header + .take() + .expect("Must have a block header on commit"); + self.in_mem.last_block = Some(LastBlock { + height: self.in_mem.block.height, + hash: header.hash.into(), + time: header.time, + }); + self.in_mem.last_epoch = self.in_mem.block.epoch; + if is_full_commit { + // prune old merkle tree stores + self.prune_merkle_tree_stores(&mut batch)?; + } + self.db.exec_batch(batch)?; + Ok(()) + } +} + +impl WlState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + pub fn write_log(&self) -> &WriteLog { + &self.write_log + } + + pub fn in_mem(&self) -> &InMemory { + &self.in_mem + } + + pub fn in_mem_mut(&mut self) -> &mut InMemory { + &mut self.in_mem + } + + pub fn db(&self) -> &D { + // NOTE: `WlState` must not be allowed mutable access to DB + &self.db + } + + pub fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.write_log + } + + pub fn with_temp_write_log(&self) -> TempWlState<'_, D, H> { + TempWlState { + write_log: WriteLog::default(), + db: &self.db, + in_mem: &self.in_mem, + } + } + + /// Commit the current transaction's write log to the block when it's + /// accepted by all the triggered validity predicates. Starts a new + /// transaction write log. + pub fn commit_tx(&mut self) { + self.write_log.commit_tx() + } + + /// Drop the current transaction's write log when it's declined by any of + /// the triggered validity predicates. Starts a new transaction write log. + pub fn drop_tx(&mut self) { + self.write_log.drop_tx() + } + + /// Delete the provided transaction's hash from storage. + pub fn delete_tx_hash(&mut self, hash: Hash) -> write_log::Result<()> { + self.write_log.delete_tx_hash(hash) + } + + #[inline] + pub fn get_current_decision_height(&self) -> BlockHeight { + self.in_mem.get_last_block_height() + 1 + } + + /// Check if we are at a given [`BlockHeight`] offset, `height_offset`, + /// within the current epoch. + pub fn is_deciding_offset_within_epoch(&self, height_offset: u64) -> bool { + let current_decision_height = self.get_current_decision_height(); + + let pred_epochs = &self.in_mem.block.pred_epochs; + let fst_heights_of_each_epoch = pred_epochs.first_block_heights(); + + fst_heights_of_each_epoch + .last() + .map(|&h| { + let height_offset_within_epoch = h + height_offset; + current_decision_height == height_offset_within_epoch + }) + .unwrap_or(false) + } + + /// Returns a value from the specified subspace at the given height (or the + /// last committed height when 0) and the gas cost. + pub fn db_read_with_height( + &self, + key: &storage::Key, + height: BlockHeight, + ) -> Result<(Option>, u64)> { + // `0` means last committed height + if height == BlockHeight(0) + || height >= self.in_mem().get_last_block_height() + { + self.db_read(key) + } else { + if !(self.merkle_tree_key_filter)(key) { + return Ok((None, 0)); + } + + match self.db().read_subspace_val_with_height( + key, + height, + self.in_mem().get_last_block_height(), + )? { + Some(v) => { + let gas = (key.len() + v.len()) as u64 + * STORAGE_ACCESS_GAS_PER_BYTE; + Ok((Some(v), gas)) + } + None => { + Ok((None, key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE)) + } + } + } + } + + /// Write a value to the specified subspace and returns the gas cost and the + /// size difference + pub fn db_write( + &mut self, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result<(u64, i64)> { + // Note that this method is the same as `StorageWrite::write_bytes`, + // but with gas and storage bytes len diff accounting + tracing::debug!("storage write key {}", key,); + let value = value.as_ref(); + let is_key_merklized = (self.merkle_tree_key_filter)(key); + + if is_pending_transfer_key(key) { + // The tree of the bright pool stores the current height for the + // pending transfer + let height = self.in_mem.block.height.serialize_to_vec(); + self.in_mem.block.tree.update(key, height)?; + } else { + // Update the merkle tree + if is_key_merklized { + self.in_mem.block.tree.update(key, value)?; + } + } + + let len = value.len(); + let gas = (key.len() + len) as u64 * STORAGE_WRITE_GAS_PER_BYTE; + let size_diff = self.db.write_subspace_val( + self.in_mem.block.height, + key, + value, + is_key_merklized, + )?; + Ok((gas, size_diff)) + } + + /// Delete the specified subspace and returns the gas cost and the size + /// difference + pub fn db_delete(&mut self, key: &Key) -> Result<(u64, i64)> { + // Note that this method is the same as `StorageWrite::delete`, + // but with gas and storage bytes len diff accounting + let mut deleted_bytes_len = 0; + if self.db_has_key(key)?.0 { + let is_key_merklized = (self.merkle_tree_key_filter)(key); + if is_key_merklized { + self.in_mem.block.tree.delete(key)?; + } + deleted_bytes_len = self.db.delete_subspace_val( + self.in_mem.block.height, + key, + is_key_merklized, + )?; + } + let gas = (key.len() + deleted_bytes_len as usize) as u64 + * STORAGE_WRITE_GAS_PER_BYTE; + Ok((gas, deleted_bytes_len)) + } + + /// Get a Tendermint-compatible existence proof. + /// + /// Proofs from the Ethereum bridge pool are not + /// Tendermint-compatible. Requesting for a key + /// belonging to the bridge pool will cause this + /// method to error. + pub fn get_existence_proof( + &self, + key: &Key, + value: namada_merkle_tree::StorageBytes, + height: BlockHeight, + ) -> Result { + use std::array; + + // `0` means last committed height + let height = if height == BlockHeight(0) { + self.in_mem.get_last_block_height() + } else { + height + }; + + if height > self.in_mem.get_last_block_height() { + if let MembershipProof::ICS23(proof) = self + .in_mem + .block + .tree + .get_sub_tree_existence_proof(array::from_ref(key), vec![value]) + .map_err(Error::MerkleTreeError)? + { + self.in_mem + .block + .tree + .get_sub_tree_proof(key, proof) + .map(Into::into) + .map_err(Error::MerkleTreeError) + } else { + Err(Error::MerkleTreeError(MerkleTreeError::TendermintProof)) + } + } else { + let (store_type, _) = StoreType::sub_key(key)?; + let tree = self.get_merkle_tree(height, Some(store_type))?; + if let MembershipProof::ICS23(proof) = tree + .get_sub_tree_existence_proof(array::from_ref(key), vec![value]) + .map_err(Error::MerkleTreeError)? + { + tree.get_sub_tree_proof(key, proof) + .map(Into::into) + .map_err(Error::MerkleTreeError) + } else { + Err(Error::MerkleTreeError(MerkleTreeError::TendermintProof)) + } + } + } + + /// Get the non-existence proof + pub fn get_non_existence_proof( + &self, + key: &Key, + height: BlockHeight, + ) -> Result { + // `0` means last committed height + let height = if height == BlockHeight(0) { + self.in_mem.get_last_block_height() + } else { + height + }; + + if height > self.in_mem.get_last_block_height() { + Err(Error::Temporary { + error: format!( + "The block at the height {} hasn't committed yet", + height, + ), + }) + } else { + let (store_type, _) = StoreType::sub_key(key)?; + self.get_merkle_tree(height, Some(store_type))? + .get_non_existence_proof(key) + .map(Into::into) + .map_err(Error::MerkleTreeError) + } + } + + /// Rebuild Merkle tree with diffs in the DB. + /// Base tree and the specified `store_type` subtree is rebuilt. + /// If `store_type` isn't given, full Merkle tree is restored. + pub fn get_merkle_tree( + &self, + height: BlockHeight, + store_type: Option, + ) -> Result> { + // `0` means last committed height + let height = if height == BlockHeight(0) { + self.in_mem.get_last_block_height() + } else { + height + }; + + let epoch = self + .in_mem + .block + .pred_epochs + .get_epoch(height) + .unwrap_or_default(); + let epoch_start_height = match self + .in_mem + .block + .pred_epochs + .get_start_height_of_epoch(epoch) + { + Some(BlockHeight(0)) => BlockHeight(1), + Some(height) => height, + None => BlockHeight(1), + }; + let stores = self + .db + .read_merkle_tree_stores(epoch, epoch_start_height, store_type)? + .ok_or(Error::NoMerkleTree { height })?; + let prefix = store_type.and_then(|st| st.provable_prefix()); + let mut tree = match store_type { + Some(_) => MerkleTree::::new_partial(stores), + None => MerkleTree::::new(stores).expect("invalid stores"), + }; + // Restore the tree state with diffs + let mut target_height = epoch_start_height; + while target_height < height { + target_height = target_height.next_height(); + let mut old_diff_iter = + self.db.iter_old_diffs(target_height, prefix.as_ref()); + let mut new_diff_iter = + self.db.iter_new_diffs(target_height, prefix.as_ref()); + + let mut old_diff = old_diff_iter.next(); + let mut new_diff = new_diff_iter.next(); + loop { + match (&old_diff, &new_diff) { + (Some(old), Some(new)) => { + let old_key = Key::parse(old.0.clone()) + .expect("the key should be parsable"); + let new_key = Key::parse(new.0.clone()) + .expect("the key should be parsable"); + + // compare keys as String + match old.0.cmp(&new.0) { + Ordering::Equal => { + // the value was updated + if (self.merkle_tree_key_filter)(&new_key) { + tree.update( + &new_key, + if is_pending_transfer_key(&new_key) { + target_height.serialize_to_vec() + } else { + new.1.clone() + }, + )?; + } + old_diff = old_diff_iter.next(); + new_diff = new_diff_iter.next(); + } + Ordering::Less => { + // the value was deleted + if (self.merkle_tree_key_filter)(&old_key) { + tree.delete(&old_key)?; + } + old_diff = old_diff_iter.next(); + } + Ordering::Greater => { + // the value was inserted + if (self.merkle_tree_key_filter)(&new_key) { + tree.update( + &new_key, + if is_pending_transfer_key(&new_key) { + target_height.serialize_to_vec() + } else { + new.1.clone() + }, + )?; + } + new_diff = new_diff_iter.next(); + } + } + } + (Some(old), None) => { + // the value was deleted + let key = Key::parse(old.0.clone()) + .expect("the key should be parsable"); + + if (self.merkle_tree_key_filter)(&key) { + tree.delete(&key)?; + } + + old_diff = old_diff_iter.next(); + } + (None, Some(new)) => { + // the value was inserted + let key = Key::parse(new.0.clone()) + .expect("the key should be parsable"); + + if (self.merkle_tree_key_filter)(&key) { + tree.update( + &key, + if is_pending_transfer_key(&key) { + target_height.serialize_to_vec() + } else { + new.1.clone() + }, + )?; + } + + new_diff = new_diff_iter.next(); + } + (None, None) => break, + } + } + } + if let Some(st) = store_type { + // Add the base tree with the given height + let mut stores = self + .db + .read_merkle_tree_stores(epoch, height, Some(StoreType::Base))? + .ok_or(Error::NoMerkleTree { height })?; + let restored_stores = tree.stores(); + // Set the root and store of the rebuilt subtree + stores.set_root(&st, *restored_stores.root(&st)); + stores.set_store(restored_stores.store(&st).to_owned()); + tree = MerkleTree::::new_partial(stores); + } + Ok(tree) + } + + /// Get the timestamp of the last committed block, or the current timestamp + /// if no blocks have been produced yet + pub fn get_last_block_timestamp(&self) -> Result { + let last_block_height = self.in_mem.get_block_height().0; + + Ok(self + .db + .read_block_header(last_block_height)? + .map_or_else(DateTimeUtc::now, |header| header.time)) + } +} + +impl TempWlState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + pub fn write_log(&self) -> &WriteLog { + &self.write_log + } + + pub fn in_mem(&self) -> &InMemory { + self.in_mem + } + + pub fn db(&self) -> &D { + self.db + } + + pub fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.write_log + } + + /// Check if the given tx hash has already been processed + pub fn has_replay_protection_entry(&self, hash: &Hash) -> Result { + if let Some(present) = self.write_log.has_replay_protection_entry(hash) + { + return Ok(present); + } + + self.db() + .has_replay_protection_entry(hash) + .map_err(Error::DbError) + } + + /// Check if the given tx hash has already been committed to storage + pub fn has_committed_replay_protection_entry( + &self, + hash: &Hash, + ) -> Result { + self.db() + .has_replay_protection_entry(hash) + .map_err(Error::DbError) + } +} + +impl StateRead for FullAccessState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + type D = D; + type H = H; + + fn db(&self) -> &D { + &self.0.db + } + + fn in_mem(&self) -> &InMemory { + &self.0.in_mem + } + + fn write_log(&self) -> &WriteLog { + &self.0.write_log + } + + fn charge_gas(&self, _gas: u64) -> Result<()> { + Ok(()) + } +} + +impl State for FullAccessState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.0.write_log + } + + fn split_borrow( + &mut self, + ) -> (&mut WriteLog, &InMemory, &Self::D) { + (&mut self.0.write_log, &self.0.in_mem, &self.0.db) + } +} + +impl WithConversionState for FullAccessState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn conversion_state(&self) -> &ConversionState { + &self.in_mem().conversion_state + } + + fn conversion_state_mut(&mut self) -> &mut ConversionState { + &mut self.in_mem_mut().conversion_state + } +} + +impl StateRead for WlState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + type D = D; + type H = H; + + fn write_log(&self) -> &WriteLog { + &self.write_log + } + + fn db(&self) -> &D { + &self.db + } + + fn in_mem(&self) -> &InMemory { + &self.in_mem + } + + fn charge_gas(&self, _gas: u64) -> Result<()> { + Ok(()) + } +} + +impl State for WlState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.write_log + } + + fn split_borrow( + &mut self, + ) -> (&mut WriteLog, &InMemory, &Self::D) { + (&mut self.write_log, &self.in_mem, &self.db) + } +} + +impl StateRead for TempWlState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + type D = D; + type H = H; + + fn write_log(&self) -> &WriteLog { + &self.write_log + } + + fn db(&self) -> &D { + self.db + } + + fn in_mem(&self) -> &InMemory { + self.in_mem + } + + fn charge_gas(&self, _gas: u64) -> Result<()> { + Ok(()) + } +} + +impl State for TempWlState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.write_log + } + + fn split_borrow( + &mut self, + ) -> (&mut WriteLog, &InMemory, &Self::D) { + (&mut self.write_log, (self.in_mem), (self.db)) + } +} + +impl Deref for FullAccessState +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + type Target = WlState; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for FullAccessState +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} diff --git a/crates/state/src/wl_storage.rs b/crates/state/src/wl_storage.rs deleted file mode 100644 index e971315a99..0000000000 --- a/crates/state/src/wl_storage.rs +++ /dev/null @@ -1,883 +0,0 @@ -//! Storage with write log. - -use std::iter::Peekable; - -use namada_core::types::address::Address; -use namada_core::types::hash::{Hash, StorageHasher}; -use namada_core::types::storage::{self, BlockHeight, Epochs}; -use namada_core::types::time::DateTimeUtc; -use namada_parameters::EpochDuration; -use namada_storage::{ResultExt, StorageRead, StorageWrite}; - -use super::EPOCH_SWITCH_BLOCKS_DELAY; -use crate::write_log::{self, WriteLog}; -use crate::{DBIter, State, DB}; - -/// Storage with write log that allows to implement prefix iterator that works -/// with changes not yet committed to the DB. -#[derive(Debug)] -pub struct WlStorage -where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, -{ - /// Write log - pub write_log: WriteLog, - /// Storage provides access to DB - pub storage: State, -} - -/// Temporary storage that can be used for changes that will never be committed -/// to the DB. This is useful for the shell `PrepareProposal` and -/// `ProcessProposal` handlers that should not change state, but need to apply -/// storage changes for replay protection to validate the proposal. -#[derive(Debug)] -pub struct TempWlStorage<'a, D, H> -where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, -{ - /// Write log - pub write_log: WriteLog, - /// Storage provides access to DB - pub storage: &'a State, -} - -impl<'a, D, H> TempWlStorage<'a, D, H> -where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, -{ - /// Create a temp storage that can mutated in memory, but never committed to - /// DB. - pub fn new(storage: &'a State) -> Self { - Self { - write_log: WriteLog::default(), - storage, - } - } - - /// Check if the given tx hash has already been processed - pub fn has_replay_protection_entry( - &self, - hash: &Hash, - ) -> Result { - if let Some(present) = self.write_log.has_replay_protection_entry(hash) - { - return Ok(present); - } - - self.storage.has_replay_protection_entry(hash) - } - - /// Check if the given tx hash has already been committed to storage - pub fn has_committed_replay_protection_entry( - &self, - hash: &Hash, - ) -> Result { - self.storage.has_replay_protection_entry(hash) - } -} - -/// Common trait for [`WlStorage`] and [`TempWlStorage`], used to implement -/// namada_storage traits. -pub trait WriteLogAndStorage { - /// DB type - type D: DB + for<'iter> DBIter<'iter>; - /// DB hasher type - type H: StorageHasher; - - /// Borrow `WriteLog` - fn write_log(&self) -> &WriteLog; - - /// Borrow mutable `WriteLog` - fn write_log_mut(&mut self) -> &mut WriteLog; - - /// Borrow `Storage` - fn storage(&self) -> &State; - - /// Splitting borrow to get immutable reference to the `Storage` and mutable - /// reference to `WriteLog` when in need of both (avoids complain from the - /// borrow checker) - fn split_borrow(&mut self) -> (&mut WriteLog, &State); - - /// Write the provided tx hash to storage. - fn write_tx_hash(&mut self, hash: Hash) -> write_log::Result<()>; -} - -impl WriteLogAndStorage for WlStorage -where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, -{ - type D = D; - type H = H; - - fn write_log(&self) -> &WriteLog { - &self.write_log - } - - fn write_log_mut(&mut self) -> &mut WriteLog { - &mut self.write_log - } - - fn storage(&self) -> &State { - &self.storage - } - - fn split_borrow(&mut self) -> (&mut WriteLog, &State) { - (&mut self.write_log, &self.storage) - } - - fn write_tx_hash(&mut self, hash: Hash) -> write_log::Result<()> { - self.write_log.write_tx_hash(hash) - } -} - -impl WriteLogAndStorage for TempWlStorage<'_, D, H> -where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, -{ - type D = D; - type H = H; - - fn write_log(&self) -> &WriteLog { - &self.write_log - } - - fn write_log_mut(&mut self) -> &mut WriteLog { - &mut self.write_log - } - - fn storage(&self) -> &State { - self.storage - } - - fn split_borrow(&mut self) -> (&mut WriteLog, &State) { - (&mut self.write_log, (self.storage)) - } - - fn write_tx_hash(&mut self, hash: Hash) -> write_log::Result<()> { - self.write_log.write_tx_hash(hash) - } -} - -impl WlStorage -where - D: 'static + DB + for<'iter> DBIter<'iter>, - H: 'static + StorageHasher, -{ - /// Combine storage with write-log - pub fn new(write_log: WriteLog, storage: State) -> Self { - Self { write_log, storage } - } - - /// Commit the current transaction's write log to the block when it's - /// accepted by all the triggered validity predicates. Starts a new - /// transaction write log. - pub fn commit_tx(&mut self) { - self.write_log.commit_tx() - } - - /// Drop the current transaction's write log when it's declined by any of - /// the triggered validity predicates. Starts a new transaction write log. - pub fn drop_tx(&mut self) { - self.write_log.drop_tx() - } - - /// Commit the current block's write log to the storage and commit the block - /// to DB. Starts a new block write log. - pub fn commit_block(&mut self) -> namada_storage::Result<()> { - if self.storage.last_epoch != self.storage.block.epoch { - self.storage - .update_epoch_in_merkle_tree() - .into_storage_result()?; - } - - let mut batch = D::batch(); - self.write_log - .commit_block(&mut self.storage, &mut batch) - .into_storage_result()?; - self.storage.commit_block(batch).into_storage_result() - } - - /// Initialize a new epoch when the current epoch is finished. Returns - /// `true` on a new epoch. - pub fn update_epoch( - &mut self, - height: BlockHeight, - time: DateTimeUtc, - ) -> storage::Result { - let parameters = namada_parameters::read(self) - .expect("Couldn't read protocol parameters"); - - match self.storage.update_epoch_blocks_delay.as_mut() { - None => { - // Check if the new epoch minimum start height and start time - // have been fulfilled. If so, queue the next - // epoch to start two blocks into the future so - // as to align validator set updates + etc with - // tendermint. This is because tendermint has a two block delay - // to validator changes. - let current_epoch_duration_satisfied = height - >= self.storage.next_epoch_min_start_height - && time >= self.storage.next_epoch_min_start_time; - if current_epoch_duration_satisfied { - self.storage.update_epoch_blocks_delay = - Some(EPOCH_SWITCH_BLOCKS_DELAY); - } - } - Some(blocks_until_switch) => { - *blocks_until_switch -= 1; - } - }; - let new_epoch = - matches!(self.storage.update_epoch_blocks_delay, Some(0)); - - if new_epoch { - // Reset the delay tracker - self.storage.update_epoch_blocks_delay = None; - - // Begin a new epoch - self.storage.block.epoch = self.storage.block.epoch.next(); - let EpochDuration { - min_num_of_blocks, - min_duration, - } = parameters.epoch_duration; - self.storage.next_epoch_min_start_height = - height + min_num_of_blocks; - self.storage.next_epoch_min_start_time = time + min_duration; - - self.storage.block.pred_epochs.new_epoch(height); - tracing::info!("Began a new epoch {}", self.storage.block.epoch); - } - Ok(new_epoch) - } - - /// Delete the provided transaction's hash from storage. - pub fn delete_tx_hash(&mut self, hash: Hash) -> write_log::Result<()> { - self.write_log.delete_tx_hash(hash) - } - - #[inline] - pub fn get_current_decision_height(&self) -> BlockHeight { - self.storage.get_last_block_height() + 1 - } - - /// Check if we are at a given [`BlockHeight`] offset, `height_offset`, - /// within the current epoch. - pub fn is_deciding_offset_within_epoch(&self, height_offset: u64) -> bool { - let current_decision_height = self.get_current_decision_height(); - - let pred_epochs = &self.storage.block.pred_epochs; - let fst_heights_of_each_epoch = pred_epochs.first_block_heights(); - - fst_heights_of_each_epoch - .last() - .map(|&h| { - let height_offset_within_epoch = h + height_offset; - current_decision_height == height_offset_within_epoch - }) - .unwrap_or(false) - } -} - -/// Prefix iterator for [`WlStorage`]. -#[derive(Debug)] -pub struct PrefixIter<'iter, D> -where - D: DB + DBIter<'iter>, -{ - /// Peekable storage iterator - pub storage_iter: Peekable<>::PrefixIter>, - /// Peekable write log iterator - pub write_log_iter: Peekable, -} - -/// Iterate write-log storage items prior to a tx execution, matching the -/// given prefix. Returns the iterator and gas cost. -pub fn iter_prefix_pre<'iter, D, H>( - // We cannot use e.g. `&'iter WlStorage`, because it doesn't live long - // enough - the lifetime of the `PrefixIter` must depend on the lifetime of - // references to the `WriteLog` and `Storage`. - write_log: &'iter WriteLog, - storage: &'iter State, - prefix: &storage::Key, -) -> (PrefixIter<'iter, D>, u64) -where - D: DB + for<'iter_> DBIter<'iter_>, - H: StorageHasher, -{ - let storage_iter = storage.db.iter_prefix(Some(prefix)).peekable(); - let write_log_iter = write_log.iter_prefix_pre(prefix).peekable(); - ( - PrefixIter { - storage_iter, - write_log_iter, - }, - prefix.len() as u64 * namada_gas::STORAGE_ACCESS_GAS_PER_BYTE, - ) -} - -/// Iterate write-log storage items posterior to a tx execution, matching the -/// given prefix. Returns the iterator and gas cost. -pub fn iter_prefix_post<'iter, D, H>( - // We cannot use e.g. `&'iter WlStorage`, because it doesn't live long - // enough - the lifetime of the `PrefixIter` must depend on the lifetime of - // references to the `WriteLog` and `Storage`. - write_log: &'iter WriteLog, - storage: &'iter State, - prefix: &storage::Key, -) -> (PrefixIter<'iter, D>, u64) -where - D: DB + for<'iter_> DBIter<'iter_>, - H: StorageHasher, -{ - let storage_iter = storage.db.iter_prefix(Some(prefix)).peekable(); - let write_log_iter = write_log.iter_prefix_post(prefix).peekable(); - ( - PrefixIter { - storage_iter, - write_log_iter, - }, - prefix.len() as u64 * namada_gas::STORAGE_ACCESS_GAS_PER_BYTE, - ) -} - -impl<'iter, D> Iterator for PrefixIter<'iter, D> -where - D: DB + DBIter<'iter>, -{ - type Item = (String, Vec, u64); - - fn next(&mut self) -> Option { - enum Next { - ReturnWl { advance_storage: bool }, - ReturnStorage, - } - loop { - let what: Next; - { - let storage_peeked = self.storage_iter.peek(); - let wl_peeked = self.write_log_iter.peek(); - match (storage_peeked, wl_peeked) { - (None, None) => return None, - (None, Some(_)) => { - what = Next::ReturnWl { - advance_storage: false, - }; - } - (Some(_), None) => { - what = Next::ReturnStorage; - } - (Some((storage_key, _, _)), Some((wl_key, _))) => { - if wl_key <= storage_key { - what = Next::ReturnWl { - advance_storage: wl_key == storage_key, - }; - } else { - what = Next::ReturnStorage; - } - } - } - } - match what { - Next::ReturnWl { advance_storage } => { - if advance_storage { - let _ = self.storage_iter.next(); - } - - if let Some((key, modification)) = - self.write_log_iter.next() - { - match modification { - write_log::StorageModification::Write { value } - | write_log::StorageModification::Temp { value } => { - let gas = value.len() as u64; - return Some((key, value, gas)); - } - write_log::StorageModification::InitAccount { - vp_code_hash, - } => { - let gas = vp_code_hash.len() as u64; - return Some((key, vp_code_hash.to_vec(), gas)); - } - write_log::StorageModification::Delete => { - continue; - } - } - } - } - Next::ReturnStorage => { - if let Some(next) = self.storage_iter.next() { - return Some(next); - } - } - } - } - } -} - -#[macro_export] -macro_rules! impl_storage_traits { - ($($type:ty)*) => { - impl StorageRead for $($type)* - where - D: 'static + DB + for<'iter> DBIter<'iter>, - H: 'static + StorageHasher, - { - type PrefixIter<'iter> = PrefixIter<'iter, D> where Self: 'iter; - - fn read_bytes( - &self, - key: &storage::Key, - ) -> namada_storage::Result>> { - // try to read from the write log first - let (log_val, _gas) = self.write_log().read(key); - match log_val { - Some(write_log::StorageModification::Write { ref value }) => { - Ok(Some(value.clone())) - } - Some(write_log::StorageModification::Delete) => Ok(None), - Some(write_log::StorageModification::InitAccount { - ref vp_code_hash, - }) => Ok(Some(vp_code_hash.to_vec())), - Some(write_log::StorageModification::Temp { ref value }) => { - Ok(Some(value.clone())) - } - None => { - // when not found in write log, try to read from the storage - self.storage() - .db - .read_subspace_val(key) - .into_storage_result() - } - } - } - - fn has_key(&self, key: &storage::Key) -> namada_storage::Result { - // try to read from the write log first - let (log_val, _gas) = self.write_log().read(key); - match log_val { - Some(&write_log::StorageModification::Write { .. }) - | Some(&write_log::StorageModification::InitAccount { .. }) - | Some(&write_log::StorageModification::Temp { .. }) => Ok(true), - Some(&write_log::StorageModification::Delete) => { - // the given key has been deleted - Ok(false) - } - None => { - // when not found in write log, try to check the storage - Ok(self.storage().has_key(key).into_storage_result()?.0) - } - } - } - - fn iter_prefix<'iter>( - &'iter self, - prefix: &storage::Key, - ) -> namada_storage::Result> { - let (iter, _gas) = - iter_prefix_post(self.write_log(), self.storage(), prefix); - Ok(iter) - } - - fn iter_next<'iter>( - &'iter self, - iter: &mut Self::PrefixIter<'iter>, - ) -> namada_storage::Result)>> { - Ok(iter.next().map(|(key, val, _gas)| (key, val))) - } - - fn get_chain_id( - &self, - ) -> std::result::Result { - Ok(self.storage().chain_id.to_string()) - } - - fn get_block_height( - &self, - ) -> std::result::Result { - Ok(self.storage().block.height) - } - - fn get_block_header( - &self, - height: storage::BlockHeight, - ) -> std::result::Result, namada_storage::Error> - { - self.storage() - .db - .read_block_header(height) - .into_storage_result() - } - - fn get_block_hash( - &self, - ) -> std::result::Result { - Ok(self.storage().block.hash.clone()) - } - - fn get_block_epoch( - &self, - ) -> std::result::Result { - Ok(self.storage().block.epoch) - } - - fn get_pred_epochs(&self) -> namada_storage::Result { - Ok(self.storage().block.pred_epochs.clone()) - } - - fn get_tx_index( - &self, - ) -> std::result::Result { - Ok(self.storage().tx_index) - } - - fn get_native_token(&self) -> namada_storage::Result
{ - Ok(self.storage().native_token.clone()) - } - } - - impl StorageWrite for $($type)* - where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, - { - // N.B. Calling this when testing pre- and post- reads in - // regards to testing native vps is incorrect. - fn write_bytes( - &mut self, - key: &storage::Key, - val: impl AsRef<[u8]>, - ) -> namada_storage::Result<()> { - let _ = self - .write_log_mut() - .protocol_write(key, val.as_ref().to_vec()) - .into_storage_result(); - Ok(()) - } - - fn delete(&mut self, key: &storage::Key) -> namada_storage::Result<()> { - let _ = self - .write_log_mut() - .protocol_delete(key) - .into_storage_result(); - Ok(()) - } - } - }; -} -impl_storage_traits!(WlStorage); -impl_storage_traits!(TempWlStorage<'_, D, H>); - -#[cfg(test)] -mod tests { - use std::collections::BTreeMap; - - use namada_core::borsh::{BorshDeserialize, BorshSerializeExt}; - use namada_core::types::address::InternalAddress; - use namada_core::types::storage::DbKeySeg; - use proptest::prelude::*; - use proptest::test_runner::Config; - // Use `RUST_LOG=info` (or another tracing level) and `--nocapture` to - // see `tracing` logs from tests - use test_log::test; - - use super::*; - use crate::testing::TestWlStorage; - - proptest! { - // Generate arb valid input for `test_prefix_iters_aux` - #![proptest_config(Config { - cases: 10, - .. Config::default() - })] - #[test] - fn test_prefix_iters( - key_vals in arb_key_vals(30), - ) { - test_prefix_iters_aux(key_vals) - } - } - - /// Check the `prefix_iter_pre` and `prefix_iter_post` return expected - /// values, generated in the input to this function - fn test_prefix_iters_aux(kvs: Vec>) { - let mut s = TestWlStorage::default(); - - // Partition the tx and storage kvs - let (tx_kvs, rest): (Vec<_>, Vec<_>) = kvs - .into_iter() - .partition(|(_key, val)| matches!(val, Level::TxWriteLog(_))); - // Partition the kvs to only apply block level first - let (block_kvs, storage_kvs): (Vec<_>, Vec<_>) = rest - .into_iter() - .partition(|(_key, val)| matches!(val, Level::BlockWriteLog(_))); - - // Apply the kvs in order of the levels - apply_to_wl_storage(&mut s, &storage_kvs); - apply_to_wl_storage(&mut s, &block_kvs); - apply_to_wl_storage(&mut s, &tx_kvs); - - // Collect the expected values in prior state - storage level then block - let mut expected_pre = BTreeMap::new(); - for (key, val) in storage_kvs { - if let Level::Storage(val) = val { - expected_pre.insert(key, val); - } - } - for (key, val) in &block_kvs { - if let Level::BlockWriteLog(WlMod::Write(val)) = val { - expected_pre.insert(key.clone(), *val); - } - } - for (key, val) in &block_kvs { - // Deletes have to be applied last - if let Level::BlockWriteLog(WlMod::Delete) = val { - expected_pre.remove(key); - } else if let Level::BlockWriteLog(WlMod::DeletePrefix) = val { - expected_pre.retain(|expected_key, _val| { - // Remove matching prefixes except for VPs - expected_key.is_validity_predicate().is_some() - || expected_key.split_prefix(key).is_none() - }) - } - } - - // Collect the values from prior state prefix iterator - let (iter_pre, _gas) = - iter_prefix_pre(&s.write_log, &s.storage, &storage::Key::default()); - let mut read_pre = BTreeMap::new(); - for (key, val, _gas) in iter_pre { - let key = storage::Key::parse(key).unwrap(); - let val: i8 = BorshDeserialize::try_from_slice(&val).unwrap(); - read_pre.insert(key, val); - } - - // A helper for dbg - let keys_to_string = |kvs: &BTreeMap| { - kvs.iter() - .map(|(key, val)| (key.to_string(), *val)) - .collect::>() - }; - dbg!(keys_to_string(&expected_pre), keys_to_string(&read_pre)); - // Clone the prior expected kvs for posterior state check - let mut expected_post = expected_pre.clone(); - itertools::assert_equal(expected_pre, read_pre); - - // Collect the expected values in posterior state - all the levels - for (key, val) in &tx_kvs { - if let Level::TxWriteLog(WlMod::Write(val)) = val { - expected_post.insert(key.clone(), *val); - } - } - for (key, val) in &tx_kvs { - // Deletes have to be applied last - if let Level::TxWriteLog(WlMod::Delete) = val { - expected_post.remove(key); - } else if let Level::TxWriteLog(WlMod::DeletePrefix) = val { - expected_post.retain(|expected_key, _val| { - // Remove matching prefixes except for VPs - expected_key.is_validity_predicate().is_some() - || expected_key.split_prefix(key).is_none() - }) - } - } - - // Collect the values from posterior state prefix iterator - let (iter_post, _gas) = iter_prefix_post( - &s.write_log, - &s.storage, - &storage::Key::default(), - ); - let mut read_post = BTreeMap::new(); - for (key, val, _gas) in iter_post { - let key = storage::Key::parse(key).unwrap(); - let val: i8 = BorshDeserialize::try_from_slice(&val).unwrap(); - read_post.insert(key, val); - } - dbg!(keys_to_string(&expected_post), keys_to_string(&read_post)); - itertools::assert_equal(expected_post, read_post); - } - - fn apply_to_wl_storage(s: &mut TestWlStorage, kvs: &[KeyVal]) { - // Apply writes first - for (key, val) in kvs { - match val { - Level::TxWriteLog(WlMod::Delete | WlMod::DeletePrefix) - | Level::BlockWriteLog(WlMod::Delete | WlMod::DeletePrefix) => { - } - Level::TxWriteLog(WlMod::Write(val)) => { - s.write_log.write(key, val.serialize_to_vec()).unwrap(); - } - Level::BlockWriteLog(WlMod::Write(val)) => { - s.write_log - // protocol only writes at block level - .protocol_write(key, val.serialize_to_vec()) - .unwrap(); - } - Level::Storage(val) => { - s.storage.write(key, val.serialize_to_vec()).unwrap(); - } - } - } - // Then apply deletions - for (key, val) in kvs { - match val { - Level::TxWriteLog(WlMod::Delete) => { - s.write_log.delete(key).unwrap(); - } - Level::BlockWriteLog(WlMod::Delete) => { - s.delete(key).unwrap(); - } - Level::TxWriteLog(WlMod::DeletePrefix) => { - // Find keys matching the prefix - let keys = namada_storage::iter_prefix_bytes(s, key) - .unwrap() - .map(|res| { - let (key, _val) = res.unwrap(); - key - }) - .collect::>(); - // Delete the matching keys - for key in keys { - // Skip validity predicates which cannot be deleted - if key.is_validity_predicate().is_none() { - s.write_log.delete(&key).unwrap(); - } - } - } - Level::BlockWriteLog(WlMod::DeletePrefix) => { - s.delete_prefix(key).unwrap(); - } - _ => {} - } - } - } - - /// WlStorage key written in the write log or storage - type KeyVal = (storage::Key, Level); - - /// WlStorage write level - #[derive(Clone, Copy, Debug)] - enum Level { - TxWriteLog(WlMod), - BlockWriteLog(WlMod), - Storage(VAL), - } - - /// Write log modification - #[derive(Clone, Copy, Debug)] - enum WlMod { - Write(VAL), - Delete, - DeletePrefix, - } - - fn arb_key_vals(len: usize) -> impl Strategy>> { - // Start with some arb. storage key-vals - let storage_kvs = prop::collection::vec( - (storage::testing::arb_key(), any::()), - 1..len, - ) - .prop_map(|kvs| { - kvs.into_iter() - .filter_map(|(key, val)| { - if let DbKeySeg::AddressSeg(Address::Internal( - InternalAddress::EthBridgePool, - )) = key.segments[0] - { - None - } else { - Some((key, Level::Storage(val))) - } - }) - .collect::>() - }); - - // Select some indices to override in write log - let overrides = prop::collection::vec( - (any::(), any::(), any::()), - 1..len / 2, - ); - - // Select some indices to delete - let deletes = prop::collection::vec( - (any::(), any::()), - 1..len / 3, - ); - - // Select some indices to delete prefix - let delete_prefix = prop::collection::vec( - ( - any::(), - any::(), - // An arbitrary number of key segments to drop from a selected - // key to obtain the prefix. Because `arb_key` generates `2..5` - // segments, we can drop one less of its upper bound. - (2_usize..4), - ), - 1..len / 4, - ); - - // Combine them all together - (storage_kvs, overrides, deletes, delete_prefix).prop_map( - |(mut kvs, overrides, deletes, delete_prefix)| { - for (ix, val, is_tx) in overrides { - let (key, _) = ix.get(&kvs); - let wl_mod = WlMod::Write(val); - let lvl = if is_tx { - Level::TxWriteLog(wl_mod) - } else { - Level::BlockWriteLog(wl_mod) - }; - kvs.push((key.clone(), lvl)); - } - for (ix, is_tx) in deletes { - let (key, _) = ix.get(&kvs); - // We have to skip validity predicate keys as they cannot be - // deleted - if key.is_validity_predicate().is_some() { - continue; - } - let wl_mod = WlMod::Delete; - let lvl = if is_tx { - Level::TxWriteLog(wl_mod) - } else { - Level::BlockWriteLog(wl_mod) - }; - kvs.push((key.clone(), lvl)); - } - for (ix, is_tx, num_of_seg_to_drop) in delete_prefix { - let (key, _) = ix.get(&kvs); - let wl_mod = WlMod::DeletePrefix; - let lvl = if is_tx { - Level::TxWriteLog(wl_mod) - } else { - Level::BlockWriteLog(wl_mod) - }; - // Keep at least one segment - let num_of_seg_to_keep = std::cmp::max( - 1, - key.segments - .len() - .checked_sub(num_of_seg_to_drop) - .unwrap_or_default(), - ); - let prefix = storage::Key { - segments: key - .segments - .iter() - .take(num_of_seg_to_keep) - .cloned() - .collect(), - }; - kvs.push((prefix, lvl)); - } - kvs - }, - ) - } -} diff --git a/crates/state/src/write_log.rs b/crates/state/src/write_log.rs index 17de478659..6ce1f31d54 100644 --- a/crates/state/src/write_log.rs +++ b/crates/state/src/write_log.rs @@ -4,13 +4,10 @@ use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use itertools::Itertools; -use namada_core::ledger::replay_protection; -use namada_core::types::address::{ - Address, EstablishedAddressGen, InternalAddress, -}; -use namada_core::types::hash::{Hash, StorageHasher}; -use namada_core::types::ibc::IbcEvent; -use namada_core::types::storage; +use namada_core::address::{Address, EstablishedAddressGen, InternalAddress}; +use namada_core::hash::Hash; +use namada_core::ibc::IbcEvent; +use namada_core::storage; use namada_gas::{MEMORY_ACCESS_GAS_PER_BYTE, STORAGE_WRITE_GAS_PER_BYTE}; use namada_trans_token::storage_key::{ is_any_minted_balance_key, is_any_minter_key, is_any_token_balance_key, @@ -18,8 +15,6 @@ use namada_trans_token::storage_key::{ }; use thiserror::Error; -use crate::{DBIter, State, DB}; - #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { @@ -70,7 +65,7 @@ pub enum StorageModification { #[derive(Debug, Clone, PartialEq, Eq)] /// A replay protection storage modification -enum ReProtStorageModification { +pub(crate) enum ReProtStorageModification { /// Write an entry Write, /// Delete an entry @@ -83,12 +78,12 @@ enum ReProtStorageModification { #[derive(Debug, Clone, PartialEq, Eq)] pub struct WriteLog { /// The generator of established addresses - address_gen: Option, + pub(crate) address_gen: Option, /// All the storage modification accepted by validity predicates are stored /// in block write-log, before being committed to the storage - block_write_log: HashMap, + pub(crate) block_write_log: HashMap, /// The storage modifications for the current transaction - tx_write_log: HashMap, + pub(crate) tx_write_log: HashMap, /// A precommit bucket for the `tx_write_log`. This is useful for /// validation when a clean `tx_write_log` is needed without committing any /// modification already in there. These modifications can be temporarily @@ -97,12 +92,13 @@ pub struct WriteLog { /// write/update/delete should ever happen on this field, this log should /// only be populated through a dump of the `tx_write_log` and should be /// cleaned either when committing or dumping the `tx_write_log` - tx_precommit_write_log: HashMap, + pub(crate) tx_precommit_write_log: + HashMap, /// The IBC events for the current transaction - ibc_events: BTreeSet, + pub(crate) ibc_events: BTreeSet, /// Storage modifications for the replay protection storage, always /// committed regardless of the result of the transaction - replay_protection: HashMap, + pub(crate) replay_protection: HashMap, } /// Write log prefix iterator @@ -493,83 +489,6 @@ impl WriteLog { self.tx_write_log.clear(); } - /// Commit the current block's write log to the storage. Starts a new block - /// write log. - pub fn commit_block( - &mut self, - storage: &mut State, - batch: &mut D::WriteBatch, - ) -> Result<()> - where - D: 'static + DB + for<'iter> DBIter<'iter>, - H: StorageHasher, - { - for (key, entry) in self.block_write_log.iter() { - match entry { - StorageModification::Write { value } => { - storage - .batch_write_subspace_val(batch, key, value.clone()) - .map_err(Error::StorageError)?; - } - StorageModification::Delete => { - storage - .batch_delete_subspace_val(batch, key) - .map_err(Error::StorageError)?; - } - StorageModification::InitAccount { vp_code_hash } => { - storage - .batch_write_subspace_val(batch, key, *vp_code_hash) - .map_err(Error::StorageError)?; - } - // temporary value isn't persisted - StorageModification::Temp { .. } => {} - } - } - - // Replay protections specifically - for (hash, entry) in self.replay_protection.iter() { - match entry { - ReProtStorageModification::Write => storage - .write_replay_protection_entry( - batch, - // Can only write tx hashes to the previous block, no - // further - &replay_protection::last_key(hash), - ) - .map_err(Error::StorageError)?, - ReProtStorageModification::Delete => storage - .delete_replay_protection_entry( - batch, - // Can only delete tx hashes from the previous block, - // no further - &replay_protection::last_key(hash), - ) - .map_err(Error::StorageError)?, - ReProtStorageModification::Finalize => { - storage - .write_replay_protection_entry( - batch, - &replay_protection::all_key(hash), - ) - .map_err(Error::StorageError)?; - storage - .delete_replay_protection_entry( - batch, - &replay_protection::last_key(hash), - ) - .map_err(Error::StorageError)? - } - } - } - - if let Some(address_gen) = self.address_gen.take() { - storage.address_gen = address_gen - } - self.block_write_log.clear(); - self.replay_protection.clear(); - Ok(()) - } - /// Get the verifiers set whose validity predicates should validate the /// current transaction changes and the storage keys that have been /// modified created, updated and deleted via the write log. @@ -671,7 +590,7 @@ impl WriteLog { } /// Write the transaction hash - pub(crate) fn write_tx_hash(&mut self, hash: Hash) -> Result<()> { + pub fn write_tx_hash(&mut self, hash: Hash) -> Result<()> { if self .replay_protection .insert(hash, ReProtStorageModification::Write) @@ -688,7 +607,7 @@ impl WriteLog { } /// Remove the transaction hash - pub(crate) fn delete_tx_hash(&mut self, hash: Hash) -> Result<()> { + pub fn delete_tx_hash(&mut self, hash: Hash) -> Result<()> { match self .replay_protection .insert(hash, ReProtStorageModification::Delete) @@ -731,12 +650,12 @@ impl WriteLog { #[cfg(test)] mod tests { use assert_matches::assert_matches; - use namada_core::types::hash::Hash; - use namada_core::types::{address, storage}; + use namada_core::address; use pretty_assertions::assert_eq; use proptest::prelude::*; use super::*; + use crate::StateRead; #[test] fn test_crud_value() { @@ -897,9 +816,7 @@ mod tests { #[test] fn test_commit() { - let mut storage = crate::testing::TestStorage::default(); - let mut write_log = WriteLog::default(); - let mut batch = crate::testing::TestStorage::batch(); + let mut state = crate::testing::TestState::default(); let address_gen = EstablishedAddressGen::new("test"); let key1 = @@ -913,134 +830,132 @@ mod tests { // initialize an account let vp1 = Hash::sha256("vp1".as_bytes()); - let (addr1, _) = write_log.init_account(&address_gen, vp1); - write_log.commit_tx(); + let (addr1, _) = state.write_log.init_account(&address_gen, vp1); + state.write_log.commit_tx(); // write values let val1 = "val1".as_bytes().to_vec(); - write_log.write(&key1, val1.clone()).unwrap(); - write_log.write(&key2, val1.clone()).unwrap(); - write_log.write(&key3, val1.clone()).unwrap(); - write_log.write_temp(&key4, val1.clone()).unwrap(); - write_log.commit_tx(); + state.write_log.write(&key1, val1.clone()).unwrap(); + state.write_log.write(&key2, val1.clone()).unwrap(); + state.write_log.write(&key3, val1.clone()).unwrap(); + state.write_log.write_temp(&key4, val1.clone()).unwrap(); + state.write_log.commit_tx(); // these values are not written due to drop_tx let val2 = "val2".as_bytes().to_vec(); - write_log.write(&key1, val2.clone()).unwrap(); - write_log.write(&key2, val2.clone()).unwrap(); - write_log.write(&key3, val2).unwrap(); - write_log.drop_tx(); + state.write_log.write(&key1, val2.clone()).unwrap(); + state.write_log.write(&key2, val2.clone()).unwrap(); + state.write_log.write(&key3, val2).unwrap(); + state.write_log.drop_tx(); // deletes and updates values let val3 = "val3".as_bytes().to_vec(); - write_log.delete(&key2).unwrap(); - write_log.write(&key3, val3.clone()).unwrap(); - write_log.commit_tx(); + state.write_log.delete(&key2).unwrap(); + state.write_log.write(&key3, val3.clone()).unwrap(); + state.write_log.commit_tx(); // commit a block - write_log - .commit_block(&mut storage, &mut batch) - .expect("commit failed"); + state.commit_block().expect("commit failed"); let (vp_code_hash, _gas) = - storage.validity_predicate(&addr1).expect("vp read failed"); + state.validity_predicate(&addr1).expect("vp read failed"); assert_eq!(vp_code_hash, Some(vp1)); - let (value, _) = storage.read(&key1).expect("read failed"); + let (value, _) = state.db_read(&key1).expect("read failed"); assert_eq!(value.expect("no read value"), val1); - let (value, _) = storage.read(&key2).expect("read failed"); + let (value, _) = state.db_read(&key2).expect("read failed"); assert!(value.is_none()); - let (value, _) = storage.read(&key3).expect("read failed"); + let (value, _) = state.db_read(&key3).expect("read failed"); assert_eq!(value.expect("no read value"), val3); - let (value, _) = storage.read(&key4).expect("read failed"); + let (value, _) = state.db_read(&key4).expect("read failed"); assert_eq!(value, None); } #[test] fn test_replay_protection_commit() { - let mut storage = crate::testing::TestStorage::default(); - let mut write_log = WriteLog::default(); - let mut batch = crate::testing::TestStorage::batch(); + let mut state = crate::testing::TestState::default(); - // write some replay protection keys - write_log - .write_tx_hash(Hash::sha256("tx1".as_bytes())) - .unwrap(); - write_log - .write_tx_hash(Hash::sha256("tx2".as_bytes())) - .unwrap(); - write_log - .write_tx_hash(Hash::sha256("tx3".as_bytes())) - .unwrap(); + { + let write_log = state.write_log_mut(); + // write some replay protection keys + write_log + .write_tx_hash(Hash::sha256("tx1".as_bytes())) + .unwrap(); + write_log + .write_tx_hash(Hash::sha256("tx2".as_bytes())) + .unwrap(); + write_log + .write_tx_hash(Hash::sha256("tx3".as_bytes())) + .unwrap(); + } // commit a block - write_log - .commit_block(&mut storage, &mut batch) - .expect("commit failed"); + state.commit_block().expect("commit failed"); - assert!(write_log.replay_protection.is_empty()); + assert!(state.write_log.replay_protection.is_empty()); for tx in ["tx1", "tx2", "tx3"] { + let hash = Hash::sha256(tx.as_bytes()); assert!( - storage - .has_replay_protection_entry(&Hash::sha256(tx.as_bytes())) + state + .has_replay_protection_entry(&hash) .expect("read failed") ); } - // write some replay protection keys - write_log - .write_tx_hash(Hash::sha256("tx4".as_bytes())) - .unwrap(); - write_log - .write_tx_hash(Hash::sha256("tx5".as_bytes())) - .unwrap(); - write_log - .write_tx_hash(Hash::sha256("tx6".as_bytes())) - .unwrap(); - - // delete previous hash - write_log - .delete_tx_hash(Hash::sha256("tx1".as_bytes())) - .unwrap(); + { + let write_log = state.write_log_mut(); + // write some replay protection keys + write_log + .write_tx_hash(Hash::sha256("tx4".as_bytes())) + .unwrap(); + write_log + .write_tx_hash(Hash::sha256("tx5".as_bytes())) + .unwrap(); + write_log + .write_tx_hash(Hash::sha256("tx6".as_bytes())) + .unwrap(); - // finalize previous hashes - for tx in ["tx2", "tx3"] { + // delete previous hash write_log - .finalize_tx_hash(Hash::sha256(tx.as_bytes())) + .delete_tx_hash(Hash::sha256("tx1".as_bytes())) .unwrap(); + + // finalize previous hashes + for tx in ["tx2", "tx3"] { + write_log + .finalize_tx_hash(Hash::sha256(tx.as_bytes())) + .unwrap(); + } } // commit a block - write_log - .commit_block(&mut storage, &mut batch) - .expect("commit failed"); + state.commit_block().expect("commit failed"); - assert!(write_log.replay_protection.is_empty()); + assert!(state.write_log.replay_protection.is_empty()); for tx in ["tx2", "tx3", "tx4", "tx5", "tx6"] { assert!( - storage + state .has_replay_protection_entry(&Hash::sha256(tx.as_bytes())) .expect("read failed") ); } assert!( - !storage + !state .has_replay_protection_entry(&Hash::sha256("tx1".as_bytes())) .expect("read failed") ); // try to delete finalized hash which shouldn't work - write_log + state + .write_log .delete_tx_hash(Hash::sha256("tx2".as_bytes())) .unwrap(); // commit a block - write_log - .commit_block(&mut storage, &mut batch) - .expect("commit failed"); + state.commit_block().expect("commit failed"); - assert!(write_log.replay_protection.is_empty()); + assert!(state.write_log.replay_protection.is_empty()); assert!( - storage + state .has_replay_protection_entry(&Hash::sha256("tx2".as_bytes())) .expect("read failed") ); @@ -1108,9 +1023,9 @@ mod tests { /// Helpers for testing with write log. #[cfg(any(test, feature = "testing"))] pub mod testing { - use namada_core::types::address::testing::arb_address; - use namada_core::types::hash::HASH_LENGTH; - use namada_core::types::storage::testing::arb_key; + use namada_core::address::testing::arb_address; + use namada_core::hash::HASH_LENGTH; + use namada_core::storage::testing::arb_key; use proptest::collection; use proptest::prelude::{any, prop_oneof, Just, Strategy}; diff --git a/crates/storage/Cargo.toml b/crates/storage/Cargo.toml index 17ee879e6b..12f75ba063 100644 --- a/crates/storage/Cargo.toml +++ b/crates/storage/Cargo.toml @@ -22,6 +22,7 @@ testing = [ namada_core = { path = "../core" } namada_gas = { path = "../gas" } namada_merkle_tree = { path = "../merkle_tree" } +namada_replay_protection = { path = "../replay_protection" } namada_tx = { path = "../tx" } borsh.workspace = true diff --git a/crates/storage/src/collections/lazy_map.rs b/crates/storage/src/collections/lazy_map.rs index f2d24f7dd1..7c7ea1c2b9 100644 --- a/crates/storage/src/collections/lazy_map.rs +++ b/crates/storage/src/collections/lazy_map.rs @@ -6,7 +6,7 @@ use std::hash::Hash; use std::marker::PhantomData; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::storage::{self, DbKeySeg, KeySeg}; +use namada_core::storage::{self, DbKeySeg, KeySeg}; use thiserror::Error; use super::super::Result; @@ -539,7 +539,7 @@ where #[cfg(test)] mod test { - use namada_core::types::address::{self, Address}; + use namada_core::address::{self, Address}; use super::*; use crate::testing::TestStorage; diff --git a/crates/storage/src/collections/lazy_set.rs b/crates/storage/src/collections/lazy_set.rs index ab4e41705b..bee96d41a5 100644 --- a/crates/storage/src/collections/lazy_set.rs +++ b/crates/storage/src/collections/lazy_set.rs @@ -3,7 +3,7 @@ use std::fmt::Debug; use std::marker::PhantomData; -use namada_core::types::storage::{self, DbKeySeg, KeySeg}; +use namada_core::storage::{self, DbKeySeg, KeySeg}; use thiserror::Error; use super::super::Result; @@ -213,7 +213,7 @@ where #[cfg(test)] mod test { - use namada_core::types::address::{self, Address}; + use namada_core::address::{self, Address}; use super::*; use crate::testing::TestStorage; diff --git a/crates/storage/src/collections/lazy_vec.rs b/crates/storage/src/collections/lazy_vec.rs index 12398a3f23..2826620fd7 100644 --- a/crates/storage/src/collections/lazy_vec.rs +++ b/crates/storage/src/collections/lazy_vec.rs @@ -4,7 +4,7 @@ use std::fmt::Debug; use std::marker::PhantomData; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::storage::{self, DbKeySeg, KeySeg}; +use namada_core::storage::{self, DbKeySeg, KeySeg}; use thiserror::Error; use super::super::Result; @@ -284,7 +284,7 @@ where #[cfg(test)] mod test { - use namada_core::types::address::{self, Address}; + use namada_core::address::{self, Address}; use super::*; use crate::collections::lazy_map::{self, NestedMap}; diff --git a/crates/storage/src/collections/mod.rs b/crates/storage/src/collections/mod.rs index 8de1f05092..4cc9ca9aea 100644 --- a/crates/storage/src/collections/mod.rs +++ b/crates/storage/src/collections/mod.rs @@ -19,7 +19,7 @@ pub mod lazy_vec; pub use lazy_map::LazyMap; pub use lazy_set::LazySet; pub use lazy_vec::LazyVec; -use namada_core::types::storage; +use namada_core::storage; #[allow(missing_docs)] #[derive(Error, Debug)] diff --git a/crates/storage/src/conversion_state.rs b/crates/storage/src/conversion_state.rs new file mode 100644 index 0000000000..135b63bab5 --- /dev/null +++ b/crates/storage/src/conversion_state.rs @@ -0,0 +1,43 @@ +//! Shielded tokens conversion state + +use std::collections::BTreeMap; + +use namada_core::address::Address; +use namada_core::borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::masp_primitives::asset_type::AssetType; +use namada_core::masp_primitives::convert::AllowedConversion; +use namada_core::masp_primitives::merkle_tree::FrozenCommitmentTree; +use namada_core::masp_primitives::sapling; +use namada_core::storage::Epoch; +use namada_core::token::{Denomination, MaspDigitPos}; + +/// A representation of the conversion state +#[derive(Debug, Default, BorshSerialize, BorshDeserialize)] +pub struct ConversionState { + /// The last amount of the native token distributed + pub normed_inflation: Option, + /// The tree currently containing all the conversions + pub tree: FrozenCommitmentTree, + /// A map from token alias to actual address. + pub tokens: BTreeMap, + /// Map assets to their latest conversion and position in Merkle tree + #[allow(clippy::type_complexity)] + pub assets: BTreeMap< + AssetType, + ( + (Address, Denomination, MaspDigitPos), + Epoch, + AllowedConversion, + usize, + ), + >, +} + +/// Able to borrow mutable conversion state. +pub trait WithConversionState { + /// Borrow immutable conversion state + fn conversion_state(&self) -> &ConversionState; + + /// Borrow mutable conversion state + fn conversion_state_mut(&mut self) -> &mut ConversionState; +} diff --git a/crates/storage/src/db.rs b/crates/storage/src/db.rs index 7eef20b918..5ce22e85db 100644 --- a/crates/storage/src/db.rs +++ b/crates/storage/src/db.rs @@ -1,20 +1,20 @@ use std::fmt::Debug; -use namada_core::types::address::EstablishedAddressGen; -use namada_core::types::hash::{Error as HashError, Hash}; -use namada_core::types::storage::{ +use namada_core::address::EstablishedAddressGen; +use namada_core::hash::{Error as HashError, Hash}; +use namada_core::storage::{ BlockHash, BlockHeight, BlockResults, Epoch, Epochs, EthEventsQueue, Header, Key, }; -use namada_core::types::time::DateTimeUtc; -use namada_core::types::token::ConversionState; -use namada_core::types::{ethereum_events, ethereum_structs}; +use namada_core::time::DateTimeUtc; +use namada_core::{ethereum_events, ethereum_structs}; use namada_merkle_tree::{ Error as MerkleTreeError, MerkleTreeStoresRead, MerkleTreeStoresWrite, StoreType, }; use thiserror::Error; +use crate::conversion_state::ConversionState; use crate::tx_queue::TxQueue; #[allow(missing_docs)] @@ -25,9 +25,9 @@ pub enum Error { #[error("Found an unknown key: {key}")] UnknownKey { key: String }, #[error("Storage key error {0}")] - KeyError(namada_core::types::storage::Error), + KeyError(namada_core::storage::Error), #[error("Coding error: {0}")] - CodingError(#[from] namada_core::types::DecodeError), + CodingError(#[from] namada_core::DecodeError), #[error("Merkle tree error: {0}")] MerkleTreeError(#[from] MerkleTreeError), #[error("DB error: {0}")] diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs index 334e51e50e..e39bb485a2 100644 --- a/crates/storage/src/lib.rs +++ b/crates/storage/src/lib.rs @@ -2,6 +2,7 @@ //! and VPs (both native and WASM). pub mod collections; +pub mod conversion_state; mod db; mod error; pub mod mockdb; @@ -10,12 +11,10 @@ pub mod types; pub use db::{Error as DbError, Result as DbResult, *}; pub use error::{CustomError, Error, OptionExt, Result, ResultExt}; +use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; -use namada_core::types::address::Address; -pub use namada_core::types::hash::StorageHasher; -use namada_core::types::storage::{ - self, BlockHash, BlockHeight, Epoch, Epochs, Header, TxIndex, -}; +pub use namada_core::hash::StorageHasher; +pub use namada_core::storage::*; /// Common storage read interface /// @@ -39,10 +38,7 @@ pub trait StorageRead { /// Storage read Borsh encoded value. It will try to read from the storage /// and decode it if found. - fn read( - &self, - key: &storage::Key, - ) -> Result> { + fn read(&self, key: &Key) -> Result> { let bytes = self.read_bytes(key)?; match bytes { Some(bytes) => { @@ -54,10 +50,10 @@ pub trait StorageRead { } /// Storage read raw bytes. It will try to read from the storage. - fn read_bytes(&self, key: &storage::Key) -> Result>>; + fn read_bytes(&self, key: &Key) -> Result>>; /// Storage `has_key` in. It will try to read from the storage. - fn has_key(&self, key: &storage::Key) -> Result; + fn has_key(&self, key: &Key) -> Result; /// Storage prefix iterator ordered by the storage keys. It will try to get /// an iterator from the storage. @@ -66,7 +62,7 @@ pub trait StorageRead { /// [`fn@iter_prefix_bytes`] instead. fn iter_prefix<'iter>( &'iter self, - prefix: &storage::Key, + prefix: &Key, ) -> Result>; /// Storage prefix iterator. It will try to read from the storage. @@ -134,27 +130,19 @@ pub trait StorageRead { /// Common storage write interface pub trait StorageWrite { /// Write a value to be encoded with Borsh at the given key to storage. - fn write( - &mut self, - key: &storage::Key, - val: T, - ) -> Result<()> { + fn write(&mut self, key: &Key, val: T) -> Result<()> { let bytes = val.serialize_to_vec(); self.write_bytes(key, bytes) } /// Write a value as bytes at the given key to storage. - fn write_bytes( - &mut self, - key: &storage::Key, - val: impl AsRef<[u8]>, - ) -> Result<()>; + fn write_bytes(&mut self, key: &Key, val: impl AsRef<[u8]>) -> Result<()>; /// Delete a value at the given key from storage. - fn delete(&mut self, key: &storage::Key) -> Result<()>; + fn delete(&mut self, key: &Key) -> Result<()>; /// Delete all key-vals with a matching prefix. - fn delete_prefix(&mut self, prefix: &storage::Key) -> Result<()> + fn delete_prefix(&mut self, prefix: &Key) -> Result<()> where Self: StorageRead + Sized, { @@ -163,7 +151,7 @@ pub trait StorageWrite { let (key, _val) = res?; Ok(key) }) - .collect::>>(); + .collect::>>(); for key in keys? { // Skip validity predicates as they cannot be deleted if key.is_validity_predicate().is_none() { @@ -177,13 +165,13 @@ pub trait StorageWrite { /// Iterate items matching the given prefix, ordered by the storage keys. pub fn iter_prefix_bytes<'a>( storage: &'a impl StorageRead, - prefix: &storage::Key, -) -> Result)>> + 'a> { + prefix: &Key, +) -> Result)>> + 'a> { let iter = storage.iter_prefix(prefix)?; let iter = itertools::unfold(iter, |iter| { match storage.iter_next(iter) { Ok(Some((key, val))) => { - let key = match storage::Key::parse(key).into_storage_result() { + let key = match Key::parse(key).into_storage_result() { Ok(key) => key, Err(err) => { // Propagate key encoding errors into Iterator's Item @@ -206,8 +194,8 @@ pub fn iter_prefix_bytes<'a>( /// storage keys. pub fn iter_prefix<'a, T>( storage: &'a impl StorageRead, - prefix: &storage::Key, -) -> Result> + 'a> + prefix: &Key, +) -> Result> + 'a> where T: BorshDeserialize, { @@ -215,7 +203,7 @@ where let iter = itertools::unfold(iter, |iter| { match storage.iter_next(iter) { Ok(Some((key, val))) => { - let key = match storage::Key::parse(key).into_storage_result() { + let key = match Key::parse(key).into_storage_result() { Ok(key) => key, Err(err) => { // Propagate key encoding errors into Iterator's Item @@ -252,12 +240,12 @@ where /// don't pass the filter. For `iter_prefix_bytes`, `filter` works fine. pub fn iter_prefix_with_filter<'a, T, F>( storage: &'a impl StorageRead, - prefix: &storage::Key, + prefix: &Key, filter: F, -) -> Result> + 'a> +) -> Result> + 'a> where T: BorshDeserialize, - F: Fn(&storage::Key) -> bool + 'a, + F: Fn(&Key) -> bool + 'a, { let iter = storage.iter_prefix(prefix)?; let iter = itertools::unfold(iter, move |iter| { @@ -266,15 +254,14 @@ where loop { match storage.iter_next(iter) { Ok(Some((key, val))) => { - let key = - match storage::Key::parse(key).into_storage_result() { - Ok(key) => key, - Err(err) => { - // Propagate key encoding errors into Iterator's - // Item - return Some(Err(err)); - } - }; + let key = match Key::parse(key).into_storage_result() { + Ok(key) => key, + Err(err) => { + // Propagate key encoding errors into Iterator's + // Item + return Some(Err(err)); + } + }; // Check the predicate if !filter(&key) { continue; @@ -304,12 +291,13 @@ where /// Helpers for testing components that depend on storage #[cfg(any(test, feature = "testing"))] pub mod testing { - - use namada_core::types::address; - use namada_core::types::chain::ChainId; + use namada_core::address; + use namada_core::chain::ChainId; + pub use namada_core::storage::testing::*; use super::mockdb::MockDB; use super::*; + use crate::conversion_state::{ConversionState, WithConversionState}; /// Storage with a mock DB for testing pub struct TestStorage { @@ -319,10 +307,11 @@ pub mod testing { epoch: Epoch, pred_epochs: Epochs, native_token: Address, - merkle_tree_key_filter: fn(&storage::Key) -> bool, + conversion_state: ConversionState, + merkle_tree_key_filter: fn(&Key) -> bool, } - fn merklize_all_keys(_key: &storage::Key) -> bool { + fn merklize_all_keys(_key: &Key) -> bool { true } @@ -335,7 +324,8 @@ pub mod testing { height: BlockHeight::first(), epoch: Epoch::default(), pred_epochs: Epochs::default(), - native_token: address::nam(), + native_token: address::testing::nam(), + conversion_state: ConversionState::default(), merkle_tree_key_filter: merklize_all_keys, } } @@ -344,17 +334,17 @@ pub mod testing { impl StorageRead for TestStorage { type PrefixIter<'iter> = PrefixIter<'iter> where Self: 'iter; - fn read_bytes(&self, key: &storage::Key) -> Result>> { + fn read_bytes(&self, key: &Key) -> Result>> { self.db.read_subspace_val(key).into_storage_result() } - fn has_key(&self, key: &storage::Key) -> Result { + fn has_key(&self, key: &Key) -> Result { Ok(self.read_bytes(key)?.is_some()) } fn iter_prefix<'iter>( &'iter self, - prefix: &storage::Key, + prefix: &Key, ) -> Result> { let storage_iter = self.db.iter_prefix(Some(prefix)); Ok(PrefixIter { @@ -408,7 +398,7 @@ pub mod testing { impl StorageWrite for TestStorage { fn write_bytes( &mut self, - key: &storage::Key, + key: &Key, val: impl AsRef<[u8]>, ) -> Result<()> { let is_key_merklized = (self.merkle_tree_key_filter)(key); @@ -418,7 +408,7 @@ pub mod testing { Ok(()) } - fn delete(&mut self, key: &storage::Key) -> Result<()> { + fn delete(&mut self, key: &Key) -> Result<()> { let is_key_merklized = (self.merkle_tree_key_filter)(key); self.db .delete_subspace_val(self.height, key, is_key_merklized) @@ -427,6 +417,16 @@ pub mod testing { } } + impl WithConversionState for TestStorage { + fn conversion_state(&self) -> &ConversionState { + &self.conversion_state + } + + fn conversion_state_mut(&mut self) -> &mut ConversionState { + &mut self.conversion_state + } + } + /// Prefix iterator for [`TestStorage`]. #[derive(Debug)] pub struct PrefixIter<'iter> { diff --git a/crates/storage/src/mockdb.rs b/crates/storage/src/mockdb.rs index c23801536c..4128321e54 100644 --- a/crates/storage/src/mockdb.rs +++ b/crates/storage/src/mockdb.rs @@ -8,20 +8,19 @@ use std::str::FromStr; use itertools::Either; use namada_core::borsh::{BorshDeserialize, BorshSerializeExt}; -use namada_core::ledger::replay_protection; -use namada_core::types; -use namada_core::types::hash::Hash; -use namada_core::types::storage::{ +use namada_core::hash::Hash; +use namada_core::storage::{ BlockHeight, BlockResults, Epoch, EthEventsQueue, Header, Key, KeySeg, KEY_SEGMENT_SEPARATOR, }; -use namada_core::types::time::DateTimeUtc; -use namada_core::types::token::ConversionState; -use namada_core::types::{ethereum_events, ethereum_structs}; +use namada_core::time::DateTimeUtc; +use namada_core::{decode, encode, ethereum_events, ethereum_structs}; use namada_merkle_tree::{ base_tree_key_prefix, subtree_key_prefix, MerkleTreeStoresRead, StoreType, }; +use namada_replay_protection as replay_protection; +use crate::conversion_state::ConversionState; use crate::db::{ BlockStateRead, BlockStateWrite, DBIter, DBWriteBatch, Error, Result, DB, }; @@ -67,66 +66,52 @@ impl DB for MockDB { fn read_last_block(&self) -> Result> { // Block height let height: BlockHeight = match self.0.borrow().get("height") { - Some(bytes) => types::decode(bytes).map_err(Error::CodingError)?, + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => return Ok(None), }; // Block results let results_path = format!("results/{}", height.raw()); let results: BlockResults = match self.0.borrow().get(results_path.as_str()) { - Some(bytes) => { - types::decode(bytes).map_err(Error::CodingError)? - } + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => return Ok(None), }; // Epoch start height and time let next_epoch_min_start_height: BlockHeight = match self.0.borrow().get("next_epoch_min_start_height") { - Some(bytes) => { - types::decode(bytes).map_err(Error::CodingError)? - } + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => return Ok(None), }; let next_epoch_min_start_time: DateTimeUtc = match self.0.borrow().get("next_epoch_min_start_time") { - Some(bytes) => { - types::decode(bytes).map_err(Error::CodingError)? - } + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => return Ok(None), }; let update_epoch_blocks_delay: Option = match self.0.borrow().get("update_epoch_blocks_delay") { - Some(bytes) => { - types::decode(bytes).map_err(Error::CodingError)? - } + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => return Ok(None), }; let conversion_state: ConversionState = match self.0.borrow().get("conversion_state") { - Some(bytes) => { - types::decode(bytes).map_err(Error::CodingError)? - } + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => return Ok(None), }; let tx_queue: TxQueue = match self.0.borrow().get("tx_queue") { - Some(bytes) => types::decode(bytes).map_err(Error::CodingError)?, + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => return Ok(None), }; let ethereum_height: Option = match self.0.borrow().get("ethereum_height") { - Some(bytes) => { - types::decode(bytes).map_err(Error::CodingError)? - } + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => return Ok(None), }; let eth_events_queue: EthEventsQueue = match self.0.borrow().get("ethereum_height") { - Some(bytes) => { - types::decode(bytes).map_err(Error::CodingError)? - } + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => return Ok(None), }; @@ -154,7 +139,7 @@ impl DB for MockDB { match segments.get(3) { Some(&"root") => merkle_tree_stores.set_root( &st, - types::decode(bytes) + decode(bytes) .map_err(Error::CodingError)?, ), Some(&"store") => merkle_tree_stores @@ -168,29 +153,21 @@ impl DB for MockDB { // the block header doesn't have to be restored } "hash" => { - hash = Some( - types::decode(bytes).map_err(Error::CodingError)?, - ) + hash = Some(decode(bytes).map_err(Error::CodingError)?) } "time" => { - time = Some( - types::decode(bytes).map_err(Error::CodingError)?, - ) + time = Some(decode(bytes).map_err(Error::CodingError)?) } "epoch" => { - epoch = Some( - types::decode(bytes).map_err(Error::CodingError)?, - ) + epoch = Some(decode(bytes).map_err(Error::CodingError)?) } "pred_epochs" => { - pred_epochs = Some( - types::decode(bytes).map_err(Error::CodingError)?, - ) + pred_epochs = + Some(decode(bytes).map_err(Error::CodingError)?) } "address_gen" => { - address_gen = Some( - types::decode(bytes).map_err(Error::CodingError)?, - ); + address_gen = + Some(decode(bytes).map_err(Error::CodingError)?); } _ => unknown_key_error(path)?, }, @@ -207,7 +184,7 @@ impl DB for MockDB { { merkle_tree_stores.set_root( st, - types::decode(bytes).map_err(Error::CodingError)?, + decode(bytes).map_err(Error::CodingError)?, ); } let store_key = prefix_key.with_segment("store".to_owned()); @@ -276,29 +253,28 @@ impl DB for MockDB { // Epoch start height and time self.0.borrow_mut().insert( "next_epoch_min_start_height".into(), - types::encode(&next_epoch_min_start_height), + encode(&next_epoch_min_start_height), ); self.0.borrow_mut().insert( "next_epoch_min_start_time".into(), - types::encode(&next_epoch_min_start_time), + encode(&next_epoch_min_start_time), ); self.0.borrow_mut().insert( "update_epoch_blocks_delay".into(), - types::encode(&update_epoch_blocks_delay), + encode(&update_epoch_blocks_delay), ); self.0 .borrow_mut() - .insert("ethereum_height".into(), types::encode(ðereum_height)); - self.0.borrow_mut().insert( - "eth_events_queue".into(), - types::encode(ð_events_queue), - ); + .insert("ethereum_height".into(), encode(ðereum_height)); + self.0 + .borrow_mut() + .insert("eth_events_queue".into(), encode(ð_events_queue)); self.0 .borrow_mut() - .insert("tx_queue".into(), types::encode(&tx_queue)); + .insert("tx_queue".into(), encode(&tx_queue)); self.0 .borrow_mut() - .insert("conversion_state".into(), types::encode(conversion_state)); + .insert("conversion_state".into(), encode(conversion_state)); let prefix_key = Key::from(height.to_db_key()); // Merkle tree @@ -314,7 +290,7 @@ impl DB for MockDB { key_prefix.clone().with_segment("root".to_owned()); self.0.borrow_mut().insert( root_key.to_string(), - types::encode(merkle_tree_stores.root(st)), + encode(merkle_tree_stores.root(st)), ); let store_key = key_prefix.with_segment("store".to_owned()); self.0.borrow_mut().insert( @@ -340,27 +316,21 @@ impl DB for MockDB { let key = prefix_key .push(&"hash".to_owned()) .map_err(Error::KeyError)?; - self.0 - .borrow_mut() - .insert(key.to_string(), types::encode(&hash)); + self.0.borrow_mut().insert(key.to_string(), encode(&hash)); } // Block time { let key = prefix_key .push(&"time".to_owned()) .map_err(Error::KeyError)?; - self.0 - .borrow_mut() - .insert(key.to_string(), types::encode(&time)); + self.0.borrow_mut().insert(key.to_string(), encode(&time)); } // Block epoch { let key = prefix_key .push(&"epoch".to_owned()) .map_err(Error::KeyError)?; - self.0 - .borrow_mut() - .insert(key.to_string(), types::encode(&epoch)); + self.0.borrow_mut().insert(key.to_string(), encode(&epoch)); } // Predecessor block epochs { @@ -369,7 +339,7 @@ impl DB for MockDB { .map_err(Error::KeyError)?; self.0 .borrow_mut() - .insert(key.to_string(), types::encode(&pred_epochs)); + .insert(key.to_string(), encode(&pred_epochs)); } // Address gen { @@ -377,19 +347,15 @@ impl DB for MockDB { .push(&"address_gen".to_owned()) .map_err(Error::KeyError)?; let value = &address_gen; - self.0 - .borrow_mut() - .insert(key.to_string(), types::encode(value)); + self.0.borrow_mut().insert(key.to_string(), encode(value)); } self.0 .borrow_mut() - .insert("height".to_owned(), types::encode(&height)); + .insert("height".to_owned(), encode(&height)); // Block results { let results_path = format!("results/{}", height.raw()); - self.0 - .borrow_mut() - .insert(results_path, types::encode(&results)); + self.0.borrow_mut().insert(results_path, encode(&results)); } Ok(()) } @@ -430,7 +396,7 @@ impl DB for MockDB { let bytes = self.0.borrow().get(&root_key.to_string()).cloned(); match bytes { Some(b) => { - let root = types::decode(b).map_err(Error::CodingError)?; + let root = decode(b).map_err(Error::CodingError)?; merkle_tree_stores.set_root(st, root); } None => return Ok(None), diff --git a/crates/storage/src/tx_queue.rs b/crates/storage/src/tx_queue.rs index a183ec649f..3d5d9c7d87 100644 --- a/crates/storage/src/tx_queue.rs +++ b/crates/storage/src/tx_queue.rs @@ -1,5 +1,5 @@ use namada_core::borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::ethereum_events::EthereumEvent; +use namada_core::ethereum_events::EthereumEvent; use namada_gas::Gas; use namada_tx::Tx; diff --git a/crates/test_utils/src/tx_data.rs b/crates/test_utils/src/tx_data.rs index a985479237..945873b3ad 100644 --- a/crates/test_utils/src/tx_data.rs +++ b/crates/test_utils/src/tx_data.rs @@ -2,9 +2,9 @@ //! Namada transaction. use borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::address::Address; -use namada_core::types::storage; -use namada_core::types::token::Amount; +use namada_core::address::Address; +use namada_core::storage; +use namada_core::token::Amount; /// Represents an arbitrary write to storage at the specified key. This should /// be used alongside the test `tx_write.wasm`. diff --git a/crates/tests/src/e2e/eth_bridge_tests.rs b/crates/tests/src/e2e/eth_bridge_tests.rs index b1ea2636f5..9d7eee8ed8 100644 --- a/crates/tests/src/e2e/eth_bridge_tests.rs +++ b/crates/tests/src/e2e/eth_bridge_tests.rs @@ -7,25 +7,24 @@ use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; use color_eyre::eyre::{eyre, Result}; use expectrl::ControlCode; +use namada::control_flow::time::{Constant, Sleep}; +use namada::core::address::wnam; +use namada::core::ethereum_events::testing::DAI_ERC20_ETH_ADDRESS; +use namada::core::ethereum_events::EthAddress; +use namada::core::storage::{self, Epoch}; +use namada::core::{address, token}; use namada::eth_bridge::oracle; use namada::eth_bridge::storage::vote_tallies; use namada::ledger::eth_bridge::{ ContractVersion, Contracts, EthereumBridgeParams, MinimumConfirmations, UpgradeableContract, }; -use namada::types::address::wnam; -use namada::types::control_flow::time::{Constant, Sleep}; -use namada::types::ethereum_events::testing::DAI_ERC20_ETH_ADDRESS; -use namada::types::ethereum_events::EthAddress; -use namada::types::storage::{self, Epoch}; -use namada::types::{address, token}; use namada_apps::config::ethereum_bridge; -use namada_core::ledger::eth_bridge::ADDRESS as BRIDGE_ADDRESS; -use namada_core::types::address::Address; -use namada_core::types::ethereum_events::{ +use namada_core::address::Address; +use namada_core::ethereum_events::{ EthereumEvent, TransferToEthereum, TransferToNamada, }; -use namada_core::types::token::Amount; +use namada_core::token::Amount; use namada_test_utils::tx_data::TxWriteData; use namada_test_utils::TestWasms; use tokio::time::{Duration, Instant}; @@ -49,7 +48,7 @@ use crate::e2e::setup::{Bin, Who}; use crate::strings::{ LEDGER_STARTED, TX_ACCEPTED, TX_APPLIED_SUCCESS, VALIDATOR_NODE, }; -use crate::{run, run_as}; +use crate::{run, run_as, ADDRESS as BRIDGE_ADDRESS}; /// # Examples /// @@ -279,10 +278,9 @@ async fn test_roundtrip_eth_transfer() -> Result<()> { /// In this test, we check the following: /// 1. We can successfully add transfers to the bridge pool. /// 2. We can query the bridge pool and it is non-empty. -/// 3. We request a proof of inclusion of the transfer into the -/// bridge pool. -/// 4. We submit an Ethereum event indicating that the transfer -/// has been relayed. +/// 3. We request a proof of inclusion of the transfer into the bridge pool. +/// 4. We submit an Ethereum event indicating that the transfer has been +/// relayed. /// 5. We check that the event is removed from the bridge pool. #[tokio::test] async fn test_bridge_pool_e2e() { diff --git a/crates/tests/src/e2e/eth_bridge_tests/helpers.rs b/crates/tests/src/e2e/eth_bridge_tests/helpers.rs index 70e27b02ca..813cad5e99 100644 --- a/crates/tests/src/e2e/eth_bridge_tests/helpers.rs +++ b/crates/tests/src/e2e/eth_bridge_tests/helpers.rs @@ -7,15 +7,15 @@ use data_encoding::HEXLOWER; use eyre::{eyre, Context, Result}; use hyper::client::HttpConnector; use hyper::{Body, Client, Method, Request, StatusCode}; +use namada::core::address::{wnam, Address}; +use namada::core::ethereum_events::{ + EthAddress, EthereumEvent, TransferToNamada, Uint, +}; use namada::ledger::eth_bridge::{ wrapped_erc20s, ContractVersion, Contracts, EthereumBridgeParams, MinimumConfirmations, UpgradeableContract, }; use namada::token; -use namada::types::address::{wnam, Address}; -use namada::types::ethereum_events::{ - EthAddress, EthereumEvent, TransferToNamada, Uint, -}; use namada_apps::config::ethereum_bridge; use crate::e2e::helpers::{ diff --git a/crates/tests/src/e2e/helpers.rs b/crates/tests/src/e2e/helpers.rs index 8248057d59..6381bdb753 100644 --- a/crates/tests/src/e2e/helpers.rs +++ b/crates/tests/src/e2e/helpers.rs @@ -15,18 +15,18 @@ use color_eyre::owo_colors::OwoColorize; use data_encoding::HEXLOWER; use escargot::CargoBuild; use eyre::eyre; +use namada::core::address::Address; +use namada::core::key::*; +use namada::core::storage::Epoch; use namada::ledger::queries::{Rpc, RPC}; use namada::tendermint_rpc::HttpClient; use namada::token; -use namada::types::address::Address; -use namada::types::key::*; -use namada::types::storage::Epoch; use namada_apps::cli::context::ENV_VAR_CHAIN_ID; use namada_apps::config::genesis::chain::DeriveEstablishedAddress; use namada_apps::config::genesis::templates; use namada_apps::config::utils::convert_tm_addr_to_socket_addr; use namada_apps::config::{Config, TendermintMode}; -use namada_core::types::token::NATIVE_MAX_DECIMAL_PLACES; +use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; use namada_sdk::wallet::fs::FsWalletUtils; use namada_sdk::wallet::Wallet; use toml::Value; diff --git a/crates/tests/src/e2e/ibc_tests.rs b/crates/tests/src/e2e/ibc_tests.rs index 40968383a0..46fe94802b 100644 --- a/crates/tests/src/e2e/ibc_tests.rs +++ b/crates/tests/src/e2e/ibc_tests.rs @@ -9,7 +9,6 @@ //! To keep the temporary files created by a test, use env var //! `NAMADA_E2E_KEEP_TEMP=true`. -use core::convert::TryFrom; use core::str::FromStr; use core::time::Duration; use std::collections::{BTreeSet, HashMap}; @@ -17,6 +16,10 @@ use std::path::{Path, PathBuf}; use color_eyre::eyre::Result; use eyre::eyre; +use namada::core::address::{Address, InternalAddress}; +use namada::core::key::PublicKey; +use namada::core::storage::{BlockHeight, Epoch, Key}; +use namada::core::token::Amount; use namada::governance::cli::onchain::PgfFunding; use namada::governance::storage::proposal::{PGFIbcTarget, PGFTarget}; use namada::ibc::apps::transfer::types::VERSION as ICS20_VERSION; @@ -63,10 +66,6 @@ use namada::ledger::storage::ics23_specs::ibc_proof_specs; use namada::state::Sha256Hasher; use namada::tendermint::abci::Event as AbciEvent; use namada::tendermint::block::Height as TmHeight; -use namada::types::address::{Address, InternalAddress}; -use namada::types::key::PublicKey; -use namada::types::storage::{BlockHeight, Epoch, Key}; -use namada::types::token::Amount; use namada_apps::cli::context::ENV_VAR_CHAIN_ID; use namada_apps::client::rpc::{ query_pos_parameters, query_storage_value, query_storage_value_bytes, @@ -78,7 +77,7 @@ use namada_apps::config::{ethereum_bridge, TendermintMode}; use namada_apps::facade::tendermint::block::Header as TmHeader; use namada_apps::facade::tendermint::merkle::proof::ProofOps as TmProof; use namada_apps::facade::tendermint_rpc::{Client, HttpClient, Url}; -use namada_core::types::string_encoding::StringEncoded; +use namada_core::string_encoding::StringEncoded; use namada_sdk::masp::fs::FsShieldedUtils; use prost::Message; use setup::constants::*; @@ -167,6 +166,7 @@ fn run_ledger_ibc() -> Result<()> { // The balance should not be changed check_balances_after_back(&port_id_b, &channel_id_b, &test_a, &test_b)?; + // Shielded transfer 10 BTC from Chain A to Chain B shielded_transfer( &test_a, &test_b, @@ -179,6 +179,25 @@ fn run_ledger_ibc() -> Result<()> { )?; check_shielded_balances(&port_id_b, &channel_id_b, &test_a, &test_b)?; + // Shielded transfer 5 BTC back from Chain B to the origin-specific account + // on Chain A + shielded_transfer_back( + &test_a, + &test_b, + &client_id_a, + &client_id_b, + &port_id_a, + &channel_id_a, + &port_id_b, + &channel_id_b, + )?; + check_shielded_balances_after_back( + &port_id_b, + &channel_id_b, + &test_a, + &test_b, + )?; + // Skip tests for closing a channel and timeout_on_close since the transfer // channel cannot be closed @@ -1367,6 +1386,102 @@ fn shielded_transfer( Ok(()) } +#[allow(clippy::too_many_arguments)] +fn shielded_transfer_back( + test_a: &Test, + test_b: &Test, + client_id_a: &ClientId, + client_id_b: &ClientId, + port_id_a: &PortId, + channel_id_a: &ChannelId, + port_id_b: &PortId, + channel_id_b: &ChannelId, +) -> Result<()> { + // Get masp proof for the following IBC transfer from the destination chain + let rpc_a = get_actor_rpc(test_a, Who::Validator(0)); + // It will send 5 BTC from Chain B to PA(A) on Chain A + // Chain A will receive Chain A's BTC + std::env::set_var(ENV_VAR_CHAIN_ID, test_a.net.chain_id.to_string()); + let output_folder = test_b.test_dir.path().to_string_lossy(); + // PA(A) on Chain A will receive BTC on chain A + let token_addr = find_address(test_a, BTC)?; + let ibc_token = format!("{port_id_b}/{channel_id_b}/{token_addr}"); + let args = [ + "ibc-gen-shielded", + "--output-folder-path", + &output_folder, + "--target", + AA_PAYMENT_ADDRESS, + "--token", + &ibc_token, + "--amount", + "5", + "--port-id", + port_id_a.as_ref(), + "--channel-id", + channel_id_a.as_ref(), + "--node", + &rpc_a, + ]; + let mut client = run!(test_a, Bin::Client, args, Some(120))?; + let file_path = get_shielded_transfer_path(&mut client)?; + client.assert_success(); + + // Send a token from SP(B) on Chain B to PA(A) on Chain A + let height = transfer( + test_b, + B_SPENDING_KEY, + AA_PAYMENT_ADDRESS, + &ibc_token, + "5", + ALBERT_KEY, + port_id_b, + channel_id_b, + Some(&file_path.to_string_lossy()), + None, + None, + false, + )?; + let events = get_events(test_b, height)?; + let packet = get_packet_from_events(&events).ok_or(eyre!(TX_FAILED))?; + + let height_b = query_height(test_b)?; + let proof_commitment_on_b = + get_commitment_proof(test_b, &packet, height_b)?; + // the message member names are confusing, "_a" means the source + let msg = MsgRecvPacket { + packet, + proof_commitment_on_a: proof_commitment_on_b, + proof_height_on_a: height_b, + signer: signer(), + }; + // Update the client state of Chain B on Chain A + update_client_with_height(test_b, test_a, client_id_a, height_b)?; + // Receive the token on Chain A + let height = submit_ibc_tx(test_a, msg, ALBERT, ALBERT_KEY, false)?; + let events = get_events(test_a, height)?; + let packet = get_packet_from_events(&events).ok_or(eyre!(TX_FAILED))?; + let ack = get_ack_from_events(&events).ok_or(eyre!(TX_FAILED))?; + + // get the proof on Chain A + let height_a = query_height(test_a)?; + let proof_acked_on_a = get_ack_proof(test_a, &packet, height_a)?; + // the message member names are confusing, "_b" means the destination + let msg = MsgAcknowledgement { + packet, + acknowledgement: ack.try_into().expect("invalid ack"), + proof_acked_on_b: proof_acked_on_a, + proof_height_on_b: height_a, + signer: signer(), + }; + // Update the client state of Chain A on Chain B + update_client_with_height(test_a, test_b, client_id_b, height_a)?; + // Acknowledge on Chain B + submit_ibc_tx(test_b, msg, ALBERT, ALBERT_KEY, false)?; + + Ok(()) +} + fn get_shielded_transfer_path(client: &mut NamadaCmd) -> Result { let (_unread, matched) = client.exp_regex("Output IBC shielded transfer .*")?; @@ -1935,6 +2050,72 @@ fn check_shielded_balances( Ok(()) } +/// Check balances after IBC shielded transfer after transfer back +fn check_shielded_balances_after_back( + src_port_id: &PortId, + src_channel_id: &ChannelId, + test_a: &Test, + test_b: &Test, +) -> Result<()> { + std::env::set_var(ENV_VAR_CHAIN_ID, test_a.net.chain_id.to_string()); + let token_addr = find_address(test_a, BTC)?.to_string(); + // Check the balance on Chain B + std::env::set_var(ENV_VAR_CHAIN_ID, test_b.net.chain_id.to_string()); + let rpc_b = get_actor_rpc(test_b, Who::Validator(0)); + let tx_args = vec![ + "shielded-sync", + "--viewing-keys", + AB_VIEWING_KEY, + "--node", + &rpc_b, + ]; + let mut client = run!(test_b, Bin::Client, tx_args, Some(120))?; + client.assert_success(); + let ibc_denom = format!("{src_port_id}/{src_channel_id}/btc"); + let query_args = vec![ + "balance", + "--owner", + AB_VIEWING_KEY, + "--token", + &token_addr, + "--no-conversions", + "--node", + &rpc_b, + ]; + let expected = format!("{ibc_denom}: 5"); + let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; + client.exp_string(&expected)?; + client.assert_success(); + + // Check the balance on Chain A + std::env::set_var(ENV_VAR_CHAIN_ID, test_a.net.chain_id.to_string()); + let rpc_a = get_actor_rpc(test_a, Who::Validator(0)); + let tx_args = vec![ + "shielded-sync", + "--viewing-keys", + AA_VIEWING_KEY, + "--node", + &rpc_a, + ]; + let mut client = run!(test_a, Bin::Client, tx_args, Some(120))?; + client.assert_success(); + let query_args = vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + &token_addr, + "--no-conversions", + "--node", + &rpc_a, + ]; + let mut client = run!(test_a, Bin::Client, query_args, Some(40))?; + client.exp_string("btc: 5")?; + client.assert_success(); + + Ok(()) +} + fn check_funded_balances( dest_port_id: &PortId, dest_channel_id: &ChannelId, diff --git a/crates/tests/src/e2e/ledger_tests.rs b/crates/tests/src/e2e/ledger_tests.rs index 4e1cb2d047..2c5b6fad19 100644 --- a/crates/tests/src/e2e/ledger_tests.rs +++ b/crates/tests/src/e2e/ledger_tests.rs @@ -11,6 +11,7 @@ #![allow(clippy::type_complexity)] use std::collections::HashMap; +use std::fmt::Display; use std::path::PathBuf; use std::process::Command; use std::str::FromStr; @@ -21,17 +22,17 @@ use borsh_ext::BorshSerializeExt; use color_eyre::eyre::Result; use color_eyre::owo_colors::OwoColorize; use data_encoding::HEXLOWER; +use namada::core::address::Address; +use namada::core::storage::Epoch; use namada::governance::cli::onchain::{PgfFunding, StewardsUpdate}; use namada::governance::storage::proposal::{PGFInternalTarget, PGFTarget}; use namada::token; -use namada::types::address::Address; -use namada::types::storage::Epoch; use namada_apps::cli::context::ENV_VAR_CHAIN_ID; use namada_apps::config::ethereum_bridge; use namada_apps::config::utils::convert_tm_addr_to_socket_addr; use namada_apps::facade::tendermint_config::net::Address as TendermintAddress; -use namada_core::types::chain::ChainId; -use namada_core::types::token::NATIVE_MAX_DECIMAL_PLACES; +use namada_core::chain::ChainId; +use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; use namada_sdk::governance::pgf::cli::steward::Commission; use namada_sdk::masp::fs::FsShieldedUtils; use namada_test_utils::TestWasms; @@ -62,7 +63,7 @@ use crate::strings::{ }; use crate::{run, run_as}; -const ENV_VAR_NAMADA_ADD_PEER: &str = "NAMADA_ADD_PEER"; +const ENV_VAR_NAMADA_SEED_NODES: &str = "NAMADA_SEED_NODES"; fn start_namada_ledger_node( test: &Test, @@ -210,7 +211,6 @@ fn test_node_connectivity_and_consensus() -> Result<()> { let _bg_validator_1 = validator_1.background(); let validator_0_rpc = get_actor_rpc(&test, Who::Validator(0)); - let validator_1_rpc = get_actor_rpc(&test, Who::Validator(1)); let non_validator_rpc = get_actor_rpc(&test, Who::NonValidator); // Find the block height on the validator @@ -219,14 +219,12 @@ fn test_node_connectivity_and_consensus() -> Result<()> { // Wait for the non-validator to be synced to at least the same height wait_for_block_height(&test, &non_validator_rpc, after_tx_height, 10)?; - let query_balance_args = |ledger_rpc| { - vec![ - "balance", "--owner", ALBERT, "--token", NAM, "--node", ledger_rpc, - ] - }; - for ledger_rpc in &[validator_0_rpc, validator_1_rpc, non_validator_rpc] { + let query_balance_args = ["balance", "--owner", ALBERT, "--token", NAM]; + for who in + [Who::Validator(0), Who::Validator(1), Who::NonValidator].into_iter() + { let mut client = - run!(test, Bin::Client, query_balance_args(ledger_rpc), Some(40))?; + run_as!(test, who, Bin::Client, query_balance_args, Some(40))?; client.exp_string("nam: 2000010.1")?; client.assert_success(); } @@ -346,8 +344,8 @@ fn run_ledger_load_state_and_reset() -> Result<()> { } /// In this test we -/// 1. Run the ledger node until a pre-configured height, -/// at which point it should suspend. +/// 1. Run the ledger node until a pre-configured height, at which point it +/// should suspend. /// 2. Check that we can still query the ledger. /// 3. Check that we can shutdown the ledger normally afterwards. #[test] @@ -538,7 +536,7 @@ fn ledger_txs_and_queries() -> Result<()> { vec![ "init-account", "--public-keys", - // Value obtained from `namada::types::key::ed25519::tests::gen_keypair` + // Value obtained from `namada::core::key::ed25519::tests::gen_keypair` "tpknam1qpqfzxu3gt05jx2mvg82f4anf90psqerkwqhjey4zlqv0qfgwuvkzt5jhkp", "--threshold", "1", @@ -570,7 +568,7 @@ fn ledger_txs_and_queries() -> Result<()> { let tx_args = if dry_run && tx_args[0] == "tx" { continue; } else if dry_run { - vec![tx_args.clone(), vec!["--dry-run"]].concat() + [tx_args.clone(), vec!["--dry-run"]].concat() } else { tx_args.clone() }; @@ -1506,9 +1504,11 @@ fn test_bond_queries() -> Result<()> { client.exp_string( "All bonds total active: 100188.000000\r All bonds total: 100188.000000\r +All bonds total slashed: 0.000000\r All unbonds total active: 412.000000\r All unbonds total: 412.000000\r -All unbonds total withdrawable: 412.000000\r", +All unbonds total withdrawable: 412.000000\r +All unbonds total slashed: 0.000000\r", )?; client.assert_success(); @@ -1605,8 +1605,8 @@ fn pos_init_validator() -> Result<()> { client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); - // 3. Submit a delegation to the new validator - // First, transfer some tokens to the validator's key for fees: + // 3. Submit a delegation to the new validator First, transfer some tokens + // to the validator's key for fees: let tx_args = vec![ "transfer", "--source", @@ -2624,7 +2624,7 @@ fn proposal_offline() -> Result<()> { ); let valid_proposal_json_path = test.test_dir.path().join("valid_proposal.json"); - write_json_file(valid_proposal_json_path.as_path(), &valid_proposal_json); + write_json_file(valid_proposal_json_path.as_path(), valid_proposal_json); let mut epoch = get_epoch(&test, &validator_one_rpc).unwrap(); while epoch.0 <= 3 { @@ -2728,7 +2728,7 @@ fn double_signing_gets_slashed() -> Result<()> { use std::net::SocketAddr; use std::str::FromStr; - use namada::types::key::{self, ed25519, SigScheme}; + use namada::core::key::{self, ed25519, SigScheme}; use namada_apps::client; use namada_apps::config::Config; @@ -3025,13 +3025,12 @@ fn double_signing_gets_slashed() -> Result<()> { /// In this test we: /// 1. Run the ledger node -/// 2. For some transactions that need signature authorization: -/// 2a. Generate a new key for an implicit account. -/// 2b. Send some funds to the implicit account. -/// 2c. Submit the tx with the implicit account as the source, that -/// requires that the account has revealed its PK. This should be done -/// by the client automatically. -/// 2d. Submit same tx again, this time the client shouldn't reveal again. +/// 2. For some transactions that need signature authorization: 2a. Generate a +/// new key for an implicit account. 2b. Send some funds to the implicit +/// account. 2c. Submit the tx with the implicit account as the source, that +/// requires that the account has revealed its PK. This should be done by the +/// client automatically. 2d. Submit same tx again, this time the client +/// shouldn't reveal again. #[test] fn implicit_account_reveal_pk() -> Result<()> { let test = setup::single_node_net()?; @@ -4014,8 +4013,8 @@ fn proposal_change_shielded_reward() -> Result<()> { /// Test sync with a chain. /// /// The chain ID must be set via `NAMADA_CHAIN_ID` env var. -/// Additionally, `NAMADA_ADD_PEER` maybe be specified with a string that must -/// be parsable into `TendermintAddress`. +/// Additionally, `NAMADA_SEED_NODES` maybe be specified with a comma-separated +/// list of addresses that must be parsable into `TendermintAddress`. /// /// To run this test use `--ignored`. #[test] @@ -4053,21 +4052,32 @@ fn test_sync_chain() -> Result<()> { join_network.exp_string("Successfully configured for chain")?; join_network.assert_success(); - // Add peer if any given - if let Ok(add_peer) = std::env::var(ENV_VAR_NAMADA_ADD_PEER) { + if cfg!(debug_assertions) { + let res: Result, _> = + deserialize_comma_separated_list( + "tcp://9202be72cfe612af24b43f49f53096fc5512cd7f@194.163.172.\ + 168:26656,tcp://0edfd7e6a1a172864ddb76a10ea77a8bb242759a@65.\ + 21.194.46:36656", + ); + debug_assert!(res.is_ok(), "Expected Ok, got {res:#?}"); + } + // Add seed nodes if any given + if let Ok(seed_nodes) = std::env::var(ENV_VAR_NAMADA_SEED_NODES) { let mut config = namada_apps::config::Config::load( base_dir, &test.net.chain_id, None, ); - config.ledger.cometbft.p2p.persistent_peers.push( - TendermintAddress::from_str(&add_peer).unwrap_or_else(|_| { - panic!( - "Invalid `{ENV_VAR_NAMADA_ADD_PEER}` value. Must be a \ - valid `TendermintAddress`." - ) - }), - ); + let seed_nodes: Vec = + deserialize_comma_separated_list(&seed_nodes).unwrap_or_else( + |_| { + panic!( + "Invalid `{ENV_VAR_NAMADA_SEED_NODES}` value. Must be \ + a valid `TendermintAddress`." + ) + }, + ); + config.ledger.cometbft.p2p.seeds.extend(seed_nodes); config.write(base_dir, &test.net.chain_id, true).unwrap(); } @@ -4096,3 +4106,32 @@ fn test_sync_chain() -> Result<()> { Ok(()) } + +/// Deserialize a comma separated list of types that impl `FromStr` as a `Vec` +/// from a string. Same as `tendermint-config/src/config.rs` list +/// deserialization. +fn deserialize_comma_separated_list( + list: &str, +) -> serde_json::Result> +where + T: FromStr, + E: Display, +{ + use serde::de::Error; + + let mut result = vec![]; + + if list.is_empty() { + return Ok(result); + } + + for item in list.split(',') { + result.push( + item.parse() + .map_err(|e| serde_json::Error::custom(format!("{e}"))) + .unwrap(), + ); + } + + Ok(result) +} diff --git a/crates/tests/src/e2e/multitoken_tests/helpers.rs b/crates/tests/src/e2e/multitoken_tests/helpers.rs index 3e65116229..91edb6cae1 100644 --- a/crates/tests/src/e2e/multitoken_tests/helpers.rs +++ b/crates/tests/src/e2e/multitoken_tests/helpers.rs @@ -5,8 +5,8 @@ use std::str::FromStr; use borsh::BorshSerialize; use color_eyre::eyre::Result; use eyre::Context; -use namada_core::types::address::Address; -use namada_core::types::{storage, token}; +use namada_core::address::Address; +use namada_core::{storage, token}; use namada_test_utils::tx_data::TxWriteData; use namada_test_utils::TestWasms; use namada_tx_prelude::storage::KeySeg; @@ -41,7 +41,7 @@ pub fn init_multitoken_vp(test: &Test, rpc_addr: &str) -> Result { ARBITRARY_SIGNER, "--public-key", // Value obtained from - // `namada::types::key::ed25519::tests::gen_keypair` + // `namada::core::key::ed25519::tests::gen_keypair` "001be519a321e29020fa3cbfbfd01bd5e92db134305609270b71dace25b5a21168", "--alias", multitoken_alias, diff --git a/crates/tests/src/e2e/setup.rs b/crates/tests/src/e2e/setup.rs index 92df5b9604..1e73e52c3c 100644 --- a/crates/tests/src/e2e/setup.rs +++ b/crates/tests/src/e2e/setup.rs @@ -18,7 +18,7 @@ use expectrl::stream::log::LogStream; use expectrl::{ControlCode, Eof, WaitStatus}; use eyre::eyre; use itertools::{Either, Itertools}; -use namada::types::chain::ChainId; +use namada::core::chain::ChainId; use namada_apps::cli::context::ENV_VAR_CHAIN_ID; use namada_apps::client::utils::{ self, validator_pre_genesis_dir, validator_pre_genesis_txs_file, @@ -27,16 +27,15 @@ use namada_apps::config::genesis::utils::read_toml; use namada_apps::config::genesis::{templates, transactions, GenesisAddress}; use namada_apps::config::{ethereum_bridge, genesis, Config}; use namada_apps::{config, wallet}; -use namada_core::types::address::Address; -use namada_core::types::key::{RefTo, SchemeType}; -use namada_core::types::string_encoding::StringEncoded; -use namada_core::types::token::NATIVE_MAX_DECIMAL_PLACES; +use namada_core::address::Address; +use namada_core::key::{RefTo, SchemeType}; +use namada_core::string_encoding::StringEncoded; +use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; use namada_sdk::wallet::alias::Alias; use namada_tx_prelude::token; use once_cell::sync::Lazy; use rand::rngs::OsRng; use rand::Rng; -use serde_json; use tempfile::{tempdir, tempdir_in, TempDir}; use crate::e2e::helpers::{generate_bin_command, make_hermes_config}; @@ -369,7 +368,7 @@ pub fn network( { let base_dir = test_dir.path(); let src_path = - wallet::wallet_file(&templates_dir.join("src").join("pre-genesis")); + wallet::wallet_file(templates_dir.join("src").join("pre-genesis")); let dest_dir = base_dir.join("pre-genesis"); let dest_path = wallet::wallet_file(&dest_dir); println!( diff --git a/crates/tests/src/integration/masp.rs b/crates/tests/src/integration/masp.rs index 040857603d..8f3c6a5ac8 100644 --- a/crates/tests/src/integration/masp.rs +++ b/crates/tests/src/integration/masp.rs @@ -9,7 +9,7 @@ use namada_apps::node::ledger::shell::testing::client::run; use namada_apps::node::ledger::shell::testing::node::NodeResults; use namada_apps::node::ledger::shell::testing::utils::{Bin, CapturedOutput}; use namada_apps::wallet::defaults::christel_keypair; -use namada_core::types::dec::Dec; +use namada_core::dec::Dec; use namada_sdk::masp::fs::FsShieldedUtils; use test_log::test; @@ -1376,7 +1376,7 @@ fn masp_txs_and_queries() -> Result<()> { vec!["shielded-sync", "--node", validator_one_rpc], )?; let tx_args = if dry_run && tx_args[0] == "transfer" { - vec![tx_args.clone(), vec!["--dry-run"]].concat() + [tx_args.clone(), vec!["--dry-run"]].concat() } else { tx_args.clone() }; @@ -1963,8 +1963,8 @@ fn multiple_unfetched_txs_same_block() -> Result<()> { .shell .lock() .unwrap() - .wl_storage - .storage + .state + .in_mem() .native_token .clone(); let mut txs = vec![]; @@ -2127,9 +2127,18 @@ fn dynamic_assets() -> Result<()> { let tokens = { // Only distribute rewards for NAM tokens - let storage = &mut node.shell.lock().unwrap().wl_storage.storage; - let tokens = storage.conversion_state.tokens.clone(); - storage.conversion_state.tokens.retain(|k, _v| *k == nam); + let state = &mut node.shell.lock().unwrap().state; + let tokens = state.in_mem().conversion_state.tokens.clone(); + state + .in_mem_mut() + .conversion_state + .tokens + .insert(btc.clone(), tokens[&btc].clone()); + state + .in_mem_mut() + .conversion_state + .tokens + .retain(|k, _v| *k == nam); tokens }; // add necessary viewing keys to shielded context @@ -2215,8 +2224,9 @@ fn dynamic_assets() -> Result<()> { { // Start decoding and distributing shielded rewards for BTC in next // epoch - let storage = &mut node.shell.lock().unwrap().wl_storage.storage; - storage + let state = &mut node.shell.lock().unwrap().state; + state + .in_mem_mut() .conversion_state .tokens .insert(btc.clone(), tokens[&btc].clone()); @@ -2386,7 +2396,7 @@ fn dynamic_assets() -> Result<()> { { // Stop distributing shielded rewards for NAM in next epoch - let storage = &mut node.shell.lock().unwrap().wl_storage; + let storage = &mut node.shell.lock().unwrap().state; storage .write( &token::storage_key::masp_max_reward_rate_key(&tokens[&nam]), @@ -2445,8 +2455,8 @@ fn dynamic_assets() -> Result<()> { { // Stop decoding and distributing shielded rewards for BTC in next epoch - let storage = &mut node.shell.lock().unwrap().wl_storage.storage; - storage.conversion_state.tokens.remove(&btc); + let state = &mut node.shell.lock().unwrap().state; + state.in_mem_mut().conversion_state.tokens.remove(&btc); } // Wait till epoch boundary @@ -2546,7 +2556,7 @@ fn dynamic_assets() -> Result<()> { { // Start distributing shielded rewards for NAM in next epoch - let storage = &mut node.shell.lock().unwrap().wl_storage; + let storage = &mut node.shell.lock().unwrap().state; storage .write( &token::storage_key::masp_max_reward_rate_key(&tokens[&nam]), diff --git a/crates/tests/src/integration/setup.rs b/crates/tests/src/integration/setup.rs index f8c2b0c066..e8aa6b4ee7 100644 --- a/crates/tests/src/integration/setup.rs +++ b/crates/tests/src/integration/setup.rs @@ -5,8 +5,8 @@ use std::str::FromStr; use std::sync::{Arc, Mutex}; use color_eyre::eyre::{eyre, Result}; -use namada::types::dec::Dec; -use namada::types::token; +use namada::core::dec::Dec; +use namada::token; use namada_apps::cli::args; use namada_apps::client::utils::PRE_GENESIS_DIR; use namada_apps::config; @@ -23,7 +23,7 @@ use namada_apps::node::ledger::shell::testing::node::{ use namada_apps::node::ledger::shell::testing::utils::TestDir; use namada_apps::node::ledger::shell::Shell; use namada_apps::wallet::pre_genesis; -use namada_core::types::chain::ChainIdPrefix; +use namada_core::chain::ChainIdPrefix; use namada_sdk::wallet::alias::Alias; use crate::e2e::setup::{copy_wasm_to_chain_dir, SINGLE_NODE_NET_GENESIS}; @@ -50,7 +50,7 @@ pub fn initialize_genesis() -> Result<(MockNode, MockServicesController)> { let mut templates = templates::All::read_toml_files(&template_dir) .expect("Missing genesis files"); for (_, config) in templates.tokens.token.iter_mut() { - config.masp_params = Some(token::MaspParams { + config.masp_params = Some(token::ShieldedParams { max_reward_rate: Dec::from_str("0.1").unwrap(), kp_gain_nom: Dec::from_str("0.1").unwrap(), kd_gain_nom: Dec::from_str("0.1").unwrap(), @@ -243,7 +243,7 @@ fn create_node( .init_chain(init_req, 1) .map_err(|e| eyre!("Failed to initialize ledger: {:?}", e))?; // set the height of the first block (should be 1) - locked.wl_storage.storage.block.height = 1.into(); + locked.state.in_mem_mut().block.height = 1.into(); locked.commit(); } diff --git a/crates/tests/src/native_vp/eth_bridge_pool.rs b/crates/tests/src/native_vp/eth_bridge_pool.rs index 2f0109c73f..6a2505b171 100644 --- a/crates/tests/src/native_vp/eth_bridge_pool.rs +++ b/crates/tests/src/native_vp/eth_bridge_pool.rs @@ -1,22 +1,25 @@ #[cfg(test)] mod test_bridge_pool_vp { + use std::cell::RefCell; use std::path::PathBuf; use borsh::BorshDeserialize; use borsh_ext::BorshSerializeExt; + use namada::core::address::testing::{nam, wnam}; + use namada::core::chain::ChainId; + use namada::core::eth_bridge_pool::{ + GasFee, PendingTransfer, TransferToEthereum, TransferToEthereumKind, + }; + use namada::core::ethereum_events::EthAddress; + use namada::core::key::{common, ed25519, SecretKey}; + use namada::core::token::Amount; use namada::eth_bridge::storage::bridge_pool::BRIDGE_POOL_ADDRESS; + use namada::gas::VpGasMeter; use namada::ledger::native_vp::ethereum_bridge::bridge_pool_vp::BridgePoolVp; use namada::tx::Tx; - use namada::types::address::{nam, wnam}; - use namada::types::chain::ChainId; - use namada::types::eth_bridge_pool::{ - GasFee, PendingTransfer, TransferToEthereum, TransferToEthereumKind, - }; - use namada::types::ethereum_events::EthAddress; - use namada::types::key::{common, ed25519, SecretKey}; - use namada::types::token::Amount; use namada_apps::wallet::defaults::{albert_address, bertha_address}; use namada_apps::wasm_loader; + use namada_core::validity_predicate::VpSentinel; use namada_sdk::eth_bridge::{ wrapped_erc20s, Contracts, Erc20WhitelistEntry, EthereumBridgeParams, UpgradeableContract, @@ -35,7 +38,7 @@ mod test_bridge_pool_vp { /// A signing keypair for good old Bertha. fn bertha_keypair() -> common::SecretKey { // generated from - // [`namada::types::key::ed25519::gen_keypair`] + // [`namada::core::key::ed25519::gen_keypair`] let bytes = [ 240, 3, 224, 69, 201, 148, 60, 53, 112, 79, 80, 107, 101, 127, 186, 6, 176, 162, 113, 224, 62, 8, 183, 187, 124, 234, 244, 251, 92, 36, @@ -82,7 +85,7 @@ mod test_bridge_pool_vp { }, }; // initialize Ethereum bridge storage - config.init_storage(&mut env.wl_storage); + config.init_storage(&mut env.state); // initialize Bertha's account env.spawn_accounts([&albert_address(), &bertha_address(), &nam()]); // enrich Albert @@ -111,9 +114,13 @@ mod test_bridge_pool_vp { tx_host_env::set(env); let mut tx_env = tx_host_env::take(); tx_env.execute_tx().expect("Test failed."); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &tx_env.gas_meter.borrow(), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, BRIDGE_POOL_ADDRESS); vp_env - .validate_tx(|ctx| BridgePoolVp { ctx }) + .validate_tx(&gas_meter, &sentinel, |ctx| BridgePoolVp { ctx }) .expect("Test failed") } diff --git a/crates/tests/src/native_vp/mod.rs b/crates/tests/src/native_vp/mod.rs index f2928545b0..375e7af7fa 100644 --- a/crates/tests/src/native_vp/mod.rs +++ b/crates/tests/src/native_vp/mod.rs @@ -4,17 +4,17 @@ pub mod pos; use std::cell::RefCell; use std::collections::BTreeSet; +use namada::core::address::Address; +use namada::core::storage; use namada::ledger::gas::VpGasMeter; use namada::ledger::native_vp::{Ctx, NativeVp}; -use namada::state::mockdb::MockDB; -use namada::state::Sha256Hasher; -use namada::types::address::Address; -use namada::types::storage; +use namada::state::testing::TestState; use namada::vm::WasmCacheRwAccess; +use namada_core::validity_predicate::VpSentinel; use crate::tx::TestTxEnv; -type NativeVpCtx<'a> = Ctx<'a, MockDB, Sha256Hasher, WasmCacheRwAccess>; +type NativeVpCtx<'a> = Ctx<'a, TestState, WasmCacheRwAccess>; #[derive(Debug)] pub struct TestNativeVpEnv { @@ -44,26 +44,24 @@ impl TestNativeVpEnv { /// Run some transaction code `apply_tx` and validate it with a native VP pub fn validate_tx<'a, T>( &'a self, + gas_meter: &'a RefCell, + sentinel: &'a RefCell, init_native_vp: impl Fn(NativeVpCtx<'a>) -> T, ) -> Result::Error> where T: NativeVp, { - let ctx = Ctx { - iterators: Default::default(), - gas_meter: RefCell::new(VpGasMeter::new_from_tx_meter( - &self.tx_env.gas_meter, - )), - sentinel: Default::default(), - storage: &self.tx_env.wl_storage.storage, - write_log: &self.tx_env.wl_storage.write_log, - tx: &self.tx_env.tx, - tx_index: &self.tx_env.tx_index, - vp_wasm_cache: self.tx_env.vp_wasm_cache.clone(), - address: &self.address, - keys_changed: &self.keys_changed, - verifiers: &self.verifiers, - }; + let ctx = Ctx::new( + &self.address, + &self.tx_env.state, + &self.tx_env.tx, + &self.tx_env.tx_index, + gas_meter, + sentinel, + &self.keys_changed, + &self.verifiers, + self.tx_env.vp_wasm_cache.clone(), + ); let native_vp = init_native_vp(ctx); native_vp.validate_tx( diff --git a/crates/tests/src/native_vp/pos.rs b/crates/tests/src/native_vp/pos.rs index f4a3c6bc5e..a79c5c3d5c 100644 --- a/crates/tests/src/native_vp/pos.rs +++ b/crates/tests/src/native_vp/pos.rs @@ -95,10 +95,10 @@ //! - add slashes //! - add rewards +use namada::core::storage::Epoch; use namada::proof_of_stake::parameters::{OwnedPosParams, PosParams}; use namada::proof_of_stake::test_utils::test_init_genesis as init_genesis; use namada::proof_of_stake::types::GenesisValidator; -use namada::types::storage::Epoch; use crate::tx::tx_host_env; @@ -114,7 +114,7 @@ pub fn init_pos( tx_host_env::with(|tx_env| { // Ensure that all the used // addresses exist - let native_token = tx_env.wl_storage.storage.native_token.clone(); + let native_token = tx_env.state.in_mem().native_token.clone(); tx_env.spawn_accounts([&native_token]); for validator in genesis_validators { tx_env.spawn_accounts([&validator.address]); @@ -124,14 +124,14 @@ pub fn init_pos( 1, ) } - tx_env.wl_storage.storage.block.epoch = start_epoch; + tx_env.state.in_mem_mut().block.epoch = start_epoch; // Initialize PoS storage // tx_env - // .storage + // .state // .init_genesis(params, genesis_validators.iter(), start_epoch) // .unwrap(); let params = init_genesis( - &mut tx_env.wl_storage, + &mut tx_env.state, params.clone(), genesis_validators.iter().cloned(), start_epoch, @@ -147,11 +147,14 @@ pub fn init_pos( #[cfg(test)] mod tests { - use namada::ledger::pos::{PosParams, PosVP}; + use std::cell::RefCell; + + use namada::core::address; + use namada::core::key::common::PublicKey; + use namada::gas::VpGasMeter; + use namada::ledger::pos::PosVP; use namada::token; - use namada::types::address; - use namada::types::key::common::PublicKey; - use namada::types::storage::Epoch; + use namada_core::validity_predicate::VpSentinel; use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; use namada_tx_prelude::Address; use proptest::prelude::*; @@ -167,7 +170,6 @@ mod tests { }; use super::*; use crate::native_vp::TestNativeVpEnv; - use crate::tx::tx_host_env; prop_state_machine! { #![proptest_config(Config { @@ -267,7 +269,7 @@ mod tests { if !test_state.is_current_tx_valid { // Clear out the changes tx_host_env::with(|env| { - env.wl_storage.drop_tx(); + env.state.drop_tx(); }); } @@ -281,13 +283,13 @@ mod tests { tx_host_env::with(|env| { // Clear out the changes if !test_state.is_current_tx_valid { - env.wl_storage.drop_tx(); + env.state.drop_tx(); } // Also commit the last transaction(s) changes, if any env.commit_tx_and_block(); - env.wl_storage.storage.block.epoch = - env.wl_storage.storage.block.epoch.next(); + env.state.in_mem_mut().block.epoch = + env.state.in_mem().block.epoch.next(); }); // Starting a new tx @@ -317,7 +319,7 @@ mod tests { // Clear out the invalid changes tx_host_env::with(|env| { - env.wl_storage.drop_tx(); + env.state.drop_tx(); }) } } @@ -435,8 +437,12 @@ mod tests { // Use the tx_env to run PoS VP let tx_env = tx_host_env::take(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &tx_env.gas_meter.borrow(), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(PosVP::new); + let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); // Put the tx_env back before checking the result tx_host_env::set(vp_env.tx_env); @@ -465,8 +471,7 @@ mod tests { if self.invalid_pos_changes.is_empty() && self.invalid_arbitrary_changes.is_empty() { - self.committed_valid_actions - .extend(valid_actions_to_commit.into_iter()); + self.committed_valid_actions.extend(valid_actions_to_commit); } self.invalid_pos_changes = vec![]; self.invalid_arbitrary_changes = vec![]; @@ -569,8 +574,15 @@ mod tests { #[cfg(any(test, feature = "testing"))] pub mod testing { + use std::cell::RefCell; + use derivative::Derivative; use itertools::Either; + use namada::core::dec::Dec; + use namada::core::key::common::PublicKey; + use namada::core::key::RefTo; + use namada::core::storage::Epoch; + use namada::core::{address, key}; use namada::ledger::gas::TxGasMeter; use namada::proof_of_stake::epoched::DynEpochOffset; use namada::proof_of_stake::parameters::testing::arb_rate; @@ -582,11 +594,6 @@ pub mod testing { use namada::proof_of_stake::ADDRESS as POS_ADDRESS; use namada::token; use namada::token::{Amount, Change}; - use namada::types::dec::Dec; - use namada::types::key::common::PublicKey; - use namada::types::key::RefTo; - use namada::types::storage::Epoch; - use namada::types::{address, key}; use namada_tx_prelude::{Address, StorageRead, StorageWrite}; use proptest::prelude::*; @@ -858,9 +865,10 @@ pub mod testing { let current_epoch = tx_host_env::with(|env| { // Reset the gas meter on each change, so that we never run // out in this test + let gas_limit = env.gas_meter.borrow().tx_gas_limit; env.gas_meter = - TxGasMeter::new_from_sub_limit(env.gas_meter.tx_gas_limit); - env.wl_storage.storage.block.epoch + RefCell::new(TxGasMeter::new_from_sub_limit(gas_limit)); + env.state.in_mem().block.epoch }); println!("Current epoch {}", current_epoch); diff --git a/crates/tests/src/storage.rs b/crates/tests/src/storage.rs index 154b55de56..3a5c9a5118 100644 --- a/crates/tests/src/storage.rs +++ b/crates/tests/src/storage.rs @@ -3,7 +3,7 @@ use std::rc::Rc; use derivative::Derivative; -use namada::types::storage; +use namada::core::storage; /// A list of changes, which must be applied in the same order to get to the /// current state. diff --git a/crates/tests/src/storage_api/collections/lazy_map.rs b/crates/tests/src/storage_api/collections/lazy_map.rs index 0e9309df49..5533fccfc0 100644 --- a/crates/tests/src/storage_api/collections/lazy_map.rs +++ b/crates/tests/src/storage_api/collections/lazy_map.rs @@ -1,11 +1,10 @@ #[cfg(test)] mod tests { use std::collections::BTreeMap; - use std::convert::TryInto; use borsh::{BorshDeserialize, BorshSerialize}; - use namada::types::address::{self, Address}; - use namada::types::storage; + use namada::core::address::{self, Address}; + use namada::core::storage; use namada_tx_prelude::collections::{LazyCollection, LazyMap}; use namada_tx_prelude::storage::KeySeg; use namada_vp_prelude::collection_validation::{self, LazyCollectionExt}; @@ -154,9 +153,7 @@ mod tests { Transition::CommitTx | Transition::CommitTxAndBlock => { let valid_actions_to_commit = std::mem::take(&mut state.valid_transitions); - state - .committed_transitions - .extend(valid_actions_to_commit.into_iter()); + state.committed_transitions.extend(valid_actions_to_commit); } _ => state.valid_transitions.push(transition.clone()), } @@ -243,7 +240,7 @@ mod tests { match &transition { Transition::CommitTx => { // commit the tx without committing the block - tx_host_env::with(|env| env.wl_storage.commit_tx()); + tx_host_env::with(|env| env.state.commit_tx()); } Transition::CommitTxAndBlock => { // commit the tx and the block diff --git a/crates/tests/src/storage_api/collections/lazy_set.rs b/crates/tests/src/storage_api/collections/lazy_set.rs index 2fa04a3e40..4ac16671d9 100644 --- a/crates/tests/src/storage_api/collections/lazy_set.rs +++ b/crates/tests/src/storage_api/collections/lazy_set.rs @@ -1,10 +1,9 @@ #[cfg(test)] mod tests { use std::collections::BTreeSet; - use std::convert::TryInto; - use namada::types::address::{self, Address}; - use namada::types::storage; + use namada::core::address::{self, Address}; + use namada::core::storage; use namada_tx_prelude::collections::{LazyCollection, LazySet}; use namada_tx_prelude::storage::KeySeg; use namada_vp_prelude::collection_validation::{self, LazyCollectionExt}; @@ -139,9 +138,7 @@ mod tests { Transition::CommitTx | Transition::CommitTxAndBlock => { let valid_actions_to_commit = std::mem::take(&mut state.valid_transitions); - state - .committed_transitions - .extend(valid_actions_to_commit.into_iter()); + state.committed_transitions.extend(valid_actions_to_commit); } _ => state.valid_transitions.push(transition.clone()), } @@ -232,7 +229,7 @@ mod tests { match &transition { Transition::CommitTx => { // commit the tx without committing the block - tx_host_env::with(|env| env.wl_storage.commit_tx()); + tx_host_env::with(|env| env.state.commit_tx()); } Transition::CommitTxAndBlock => { // commit the tx and the block diff --git a/crates/tests/src/storage_api/collections/lazy_vec.rs b/crates/tests/src/storage_api/collections/lazy_vec.rs index 7a3c3f0b14..a51508dd71 100644 --- a/crates/tests/src/storage_api/collections/lazy_vec.rs +++ b/crates/tests/src/storage_api/collections/lazy_vec.rs @@ -1,10 +1,9 @@ #[cfg(test)] mod tests { - use std::convert::TryInto; use borsh::{BorshDeserialize, BorshSerialize}; - use namada::types::address::{self, Address}; - use namada::types::storage; + use namada::core::address::{self, Address}; + use namada::core::storage; use namada_tx_prelude::collections::{lazy_vec, LazyCollection, LazyVec}; use namada_tx_prelude::storage::KeySeg; use namada_vp_prelude::collection_validation::{self, LazyCollectionExt}; @@ -158,9 +157,7 @@ mod tests { Transition::CommitTx => { let valid_actions_to_commit = std::mem::take(&mut state.valid_transitions); - state - .committed_transitions - .extend(valid_actions_to_commit.into_iter()); + state.committed_transitions.extend(valid_actions_to_commit); } _ => state.valid_transitions.push(transition.clone()), } @@ -237,7 +234,7 @@ mod tests { match &transition { Transition::CommitTx => { // commit the tx without committing the block - tx_host_env::with(|env| env.wl_storage.commit_tx()); + tx_host_env::with(|env| env.state.commit_tx()); } Transition::CommitTxAndBlock => { // commit the tx and the block diff --git a/crates/tests/src/storage_api/collections/nested_lazy_map.rs b/crates/tests/src/storage_api/collections/nested_lazy_map.rs index 9a4d487886..2df9ae95bf 100644 --- a/crates/tests/src/storage_api/collections/nested_lazy_map.rs +++ b/crates/tests/src/storage_api/collections/nested_lazy_map.rs @@ -1,11 +1,10 @@ #[cfg(test)] mod tests { use std::collections::BTreeMap; - use std::convert::TryInto; use borsh::{BorshDeserialize, BorshSerialize}; - use namada::types::address::{self, Address}; - use namada::types::storage; + use namada::core::address::{self, Address}; + use namada::core::storage; use namada_tx_prelude::collections::lazy_map::{ NestedMap, NestedSubKey, SubKey, }; @@ -167,9 +166,7 @@ mod tests { Transition::CommitTx | Transition::CommitTxAndBlock => { let valid_actions_to_commit = std::mem::take(&mut state.valid_transitions); - state - .committed_transitions - .extend(valid_actions_to_commit.into_iter()); + state.committed_transitions.extend(valid_actions_to_commit); } _ => state.valid_transitions.push(transition.clone()), } @@ -256,7 +253,7 @@ mod tests { match &transition { Transition::CommitTx => { // commit the tx without committing the block - tx_host_env::with(|env| env.wl_storage.commit_tx()); + tx_host_env::with(|env| env.state.commit_tx()); } Transition::CommitTxAndBlock => { // commit the tx and the block @@ -633,17 +630,13 @@ mod tests { Transition::CommitTx | Transition::CommitTxAndBlock => {} Transition::Insert((key_outer, key_middle, key_inner), value) | Transition::Update((key_outer, key_middle, key_inner), value) => { - let middle = - map.entry(*key_outer).or_insert_with(Default::default); - let inner = - middle.entry(*key_middle).or_insert_with(Default::default); + let middle = map.entry(*key_outer).or_default(); + let inner = middle.entry(*key_middle).or_default(); inner.insert(*key_inner, value.clone()); } Transition::Remove((key_outer, key_middle, key_inner)) => { - let middle = - map.entry(*key_outer).or_insert_with(Default::default); - let inner = - middle.entry(*key_middle).or_insert_with(Default::default); + let middle = map.entry(*key_outer).or_default(); + let inner = middle.entry(*key_middle).or_default(); let _popped = inner.remove(key_inner); } } diff --git a/crates/tests/src/vm_host_env/ibc.rs b/crates/tests/src/vm_host_env/ibc.rs index 2b00bc774f..16d5cfac14 100644 --- a/crates/tests/src/vm_host_env/ibc.rs +++ b/crates/tests/src/vm_host_env/ibc.rs @@ -1,4 +1,5 @@ use core::time::Duration; +use std::cell::RefCell; use std::collections::HashMap; use ibc_testkit::testapp::ibc::clients::mock::client_state::{ @@ -6,6 +7,12 @@ use ibc_testkit::testapp::ibc::clients::mock::client_state::{ }; use ibc_testkit::testapp::ibc::clients::mock::consensus_state::MockConsensusState; use ibc_testkit::testapp::ibc::clients::mock::header::MockHeader; +use namada::core::address::{self, Address, InternalAddress}; +use namada::core::hash::Hash; +use namada::core::storage::{ + self, BlockHash, BlockHeight, Epoch, Key, TxIndex, +}; +use namada::core::time::DurationSecs; use namada::gas::TxGasMeter; use namada::governance::parameters::GovernanceParameters; use namada::ibc::apps::transfer::types::error::TokenTransferError; @@ -67,21 +74,16 @@ use namada::ledger::parameters::storage::{ get_epoch_duration_storage_key, get_max_expected_time_per_block_key, }; use namada::ledger::parameters::EpochDuration; -use namada::ledger::storage::mockdb::MockDB; use namada::ledger::tx_env::TxEnv; use namada::ledger::{ibc, pos}; use namada::proof_of_stake::OwnedPosParams; -use namada::state::Sha256Hasher; +use namada::state::testing::TestState; use namada::tendermint::time::Time as TmTime; use namada::token::{self, Amount, DenominatedAmount}; use namada::tx::Tx; -use namada::types::address::{self, Address, InternalAddress}; -use namada::types::hash::Hash; -use namada::types::storage::{ - self, BlockHash, BlockHeight, Epoch, Key, TxIndex, -}; -use namada::types::time::DurationSecs; use namada::vm::{wasm, WasmCacheRwAccess}; +use namada_core::validity_predicate::VpSentinel; +use namada_sdk::state::StateRead; use namada_test_utils::TestWasms; use namada_tx_prelude::BorshSerializeExt; @@ -92,7 +94,7 @@ pub const ANY_DENOMINATION: u8 = 4; const COMMITMENT_PREFIX: &[u8] = b"ibc"; pub struct TestIbcVp<'a> { - pub ibc: Ibc<'a, MockDB, Sha256Hasher, WasmCacheRwAccess>, + pub ibc: Ibc<'a, TestState, WasmCacheRwAccess>, } impl<'a> TestIbcVp<'a> { @@ -109,8 +111,7 @@ impl<'a> TestIbcVp<'a> { } pub struct TestMultitokenVp<'a> { - pub multitoken_vp: - MultitokenVp<'a, MockDB, Sha256Hasher, WasmCacheRwAccess>, + pub multitoken_vp: MultitokenVp<'a, TestState, WasmCacheRwAccess>, } impl<'a> TestMultitokenVp<'a> { @@ -132,8 +133,8 @@ pub fn validate_ibc_vp_from_tx<'a>( tx: &'a Tx, ) -> std::result::Result { let (verifiers, keys_changed) = tx_env - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&tx_env.verifiers); let addr = Address::Internal(InternalAddress::Ibc); if !verifiers.contains(&addr) { @@ -145,15 +146,17 @@ pub fn validate_ibc_vp_from_tx<'a>( let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(1_000_000.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &tx_env.wl_storage.storage, - &tx_env.wl_storage.write_log, + &tx_env.state, tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - 1_000_000.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -170,8 +173,8 @@ pub fn validate_multitoken_vp_from_tx<'a>( target: &Key, ) -> std::result::Result { let (verifiers, keys_changed) = tx_env - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&tx_env.verifiers); if !keys_changed.contains(target) { panic!( @@ -183,15 +186,17 @@ pub fn validate_multitoken_vp_from_tx<'a>( let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(1_000_000.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &tx_env.wl_storage.storage, - &tx_env.wl_storage.write_log, + &tx_env.state, tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - 1_000_000.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -208,11 +213,11 @@ pub fn init_storage() -> (Address, Address) { let code_hash = Hash::sha256(&code); tx_host_env::with(|env| { - ibc::init_genesis_storage(&mut env.wl_storage); + ibc::init_genesis_storage(&mut env.state); let gov_params = GovernanceParameters::default(); - gov_params.init_storage(&mut env.wl_storage).unwrap(); + gov_params.init_storage(&mut env.state).unwrap(); pos::test_utils::test_init_genesis( - &mut env.wl_storage, + &mut env.state, OwnedPosParams::default(), vec![get_dummy_genesis_validator()].into_iter(), Epoch(1), @@ -222,20 +227,17 @@ pub fn init_storage() -> (Address, Address) { let key = Key::wasm_code(&code_hash); let hash_key = Key::wasm_hash("vp_user.wasm"); let code_name_key = Key::wasm_code_name("vp_user.wasm".to_owned()); - env.wl_storage.storage.write(&key, code.clone()).unwrap(); - env.wl_storage.storage.write(&hash_key, code_hash).unwrap(); - env.wl_storage - .storage - .write(&code_name_key, code_hash) - .unwrap(); + env.state.db_write(&key, code.clone()).unwrap(); + env.state.db_write(&hash_key, code_hash).unwrap(); + env.state.db_write(&code_name_key, code_hash).unwrap(); // block header to check timeout timestamp - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(1)) .unwrap(); }); @@ -249,13 +251,11 @@ pub fn init_storage() -> (Address, Address) { let key = token::storage_key::balance_key(&token, &account); let init_bal = Amount::from_uint(100, token_denom).unwrap(); tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&denom_key, &token_denom.serialize_to_vec()) + env.state + .db_write(&denom_key, &token_denom.serialize_to_vec()) .unwrap(); - env.wl_storage - .storage - .write(&key, &init_bal.serialize_to_vec()) + env.state + .db_write(&key, &init_bal.serialize_to_vec()) .unwrap(); }); @@ -267,29 +267,29 @@ pub fn init_storage() -> (Address, Address) { }; let bytes = epoch_duration.serialize_to_vec(); tx_host_env::with(|env| { - env.wl_storage.storage.write(&key, &bytes).unwrap(); + env.state.db_write(&key, &bytes).unwrap(); }); // max_expected_time_per_block let time = DurationSecs::from(Duration::new(60, 0)); let key = get_max_expected_time_per_block_key(); - let bytes = namada::types::encode(&time); + let bytes = namada::core::encode(&time); tx_host_env::with(|env| { - env.wl_storage.storage.write(&key, &bytes).unwrap(); + env.state.db_write(&key, &bytes).unwrap(); }); // commit the initialized token and account tx_host_env::with(|env| { - env.wl_storage.commit_tx(); - env.wl_storage.commit_block().unwrap(); + env.state.commit_tx(); + env.state.commit_block().unwrap(); // block header to check timeout timestamp - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); }); @@ -318,10 +318,7 @@ pub fn prepare_client() -> (ClientId, Any, HashMap>) { // client update time let key = client_update_timestamp_key(&client_id); let time = tx_host_env::with(|env| { - let header = env - .wl_storage - .storage - .get_block_header(None) + let header = StateRead::get_block_header(&env.state, None) .unwrap() .0 .unwrap(); @@ -332,7 +329,7 @@ pub fn prepare_client() -> (ClientId, Any, HashMap>) { // client update height let key = client_update_height_key(&client_id); let height = tx_host_env::with(|env| { - let height = env.wl_storage.storage.get_block_height().0; + let height = env.state.in_mem().get_block_height().0; Height::new(0, height.0).expect("invalid height") }); let bytes = height.encode_vec(); diff --git a/crates/tests/src/vm_host_env/mod.rs b/crates/tests/src/vm_host_env/mod.rs index 29206185b1..a237c5e298 100644 --- a/crates/tests/src/vm_host_env/mod.rs +++ b/crates/tests/src/vm_host_env/mod.rs @@ -24,6 +24,11 @@ mod tests { use borsh_ext::BorshSerializeExt; use itertools::Itertools; use namada::account::pks_handle; + use namada::core::hash::Hash; + use namada::core::key::*; + use namada::core::storage::{self, BlockHash, BlockHeight, Key, KeySeg}; + use namada::core::time::DateTimeUtc; + use namada::core::{address, key}; use namada::ibc::context::transfer_mod::testing::DummyTransferModule; use namada::ibc::primitives::Msg; use namada::ibc::Error as IbcActionError; @@ -34,11 +39,6 @@ mod tests { use namada::ledger::tx_env::TxEnv; use namada::token::{self, Amount}; use namada::tx::Tx; - use namada::types::hash::Hash; - use namada::types::key::*; - use namada::types::storage::{self, BlockHash, BlockHeight, Key, KeySeg}; - use namada::types::time::DateTimeUtc; - use namada::types::{address, key}; use namada_test_utils::TestWasms; use namada_tx_prelude::address::InternalAddress; use namada_tx_prelude::chain::ChainId; @@ -168,7 +168,7 @@ mod tests { tx_host_env::with(|env| { for i in sub_keys.iter() { let key = prefix.push(i).unwrap(); - env.wl_storage.write(&key, i).unwrap(); + env.state.write(&key, i).unwrap(); } }); @@ -246,35 +246,23 @@ mod tests { assert_eq!( tx::ctx().get_chain_id().unwrap(), - tx_host_env::with(|env| env.wl_storage.storage.get_chain_id().0) + tx_host_env::with(|env| env.state.in_mem().get_chain_id().0) ); assert_eq!( tx::ctx().get_block_height().unwrap(), - tx_host_env::with(|env| env - .wl_storage - .storage - .get_block_height() - .0) + tx_host_env::with(|env| env.state.in_mem().get_block_height().0) ); assert_eq!( tx::ctx().get_block_hash().unwrap(), - tx_host_env::with(|env| env.wl_storage.storage.get_block_hash().0) + tx_host_env::with(|env| env.state.in_mem().get_block_hash().0) ); assert_eq!( tx::ctx().get_block_epoch().unwrap(), - tx_host_env::with(|env| env - .wl_storage - .storage - .get_current_epoch() - .0) + tx_host_env::with(|env| env.state.in_mem().get_current_epoch().0) ); assert_eq!( tx::ctx().get_native_token().unwrap(), - tx_host_env::with(|env| env - .wl_storage - .storage - .native_token - .clone()) + tx_host_env::with(|env| env.state.in_mem().native_token.clone()) ); } @@ -284,7 +272,8 @@ mod tests { tx_host_env::init(); let pred_epochs = tx::ctx().get_pred_epochs().unwrap(); - let expected = tx_host_env::take().wl_storage.storage.block.pred_epochs; + let expected = + tx_host_env::take().state.in_mem().block.pred_epochs.clone(); assert_eq!(expected, pred_epochs); } @@ -316,12 +305,9 @@ mod tests { let existing_key = addr_key.join(&Key::parse("existing_key_raw").unwrap()); let existing_value = vec![2_u8; 1000]; - tx_env - .wl_storage - .write(&existing_key, &existing_value) - .unwrap(); + tx_env.state.write(&existing_key, &existing_value).unwrap(); // ... and commit it - tx_env.wl_storage.commit_tx(); + tx_env.state.commit_tx(); // In a transaction, write override the existing key's value and add // another key-value @@ -404,10 +390,10 @@ mod tests { // Write some values to storage ... for i in sub_keys.iter() { let key = prefix.push(i).unwrap(); - tx_env.wl_storage.write(&key, i).unwrap(); + tx_env.state.write(&key, i).unwrap(); } // ... and commit them - tx_env.wl_storage.commit_tx(); + tx_env.state.commit_tx(); // In a transaction, write override the existing key's value and add // another key-value @@ -461,7 +447,7 @@ mod tests { let keypair = key::testing::keypair_1(); let pk = keypair.ref_to(); - let _ = pks_handle(&addr).insert(&mut env.wl_storage, 0_u8, pk.clone()); + let _ = pks_handle(&addr).insert(&mut env.state, 0_u8, pk.clone()); // Initialize the environment vp_host_env::set(env); @@ -471,14 +457,14 @@ mod tests { let expiration = Some(DateTimeUtc::now()); for data in &[ // Tx with some arbitrary data - vec![1, 2, 3, 4].repeat(10), + [1, 2, 3, 4].repeat(10), // Tx without any data vec![], ] { let keypairs = vec![keypair.clone()]; let pks_map = AccountPublicKeysMap::from_iter(vec![pk.clone()]); let signed_tx_data = vp_host_env::with(|env| { - let chain_id = env.wl_storage.storage.chain_id.clone(); + let chain_id = env.state.in_mem().chain_id.clone(); let mut tx = Tx::new(chain_id, expiration); tx.add_code(code.clone(), None) .add_serialized_data(data.to_vec()) @@ -526,35 +512,23 @@ mod tests { assert_eq!( vp::CTX.get_chain_id().unwrap(), - vp_host_env::with(|env| env.wl_storage.storage.get_chain_id().0) + vp_host_env::with(|env| env.state.in_mem().get_chain_id().0) ); assert_eq!( vp::CTX.get_block_height().unwrap(), - vp_host_env::with(|env| env - .wl_storage - .storage - .get_block_height() - .0) + vp_host_env::with(|env| env.state.in_mem().get_block_height().0) ); assert_eq!( vp::CTX.get_block_hash().unwrap(), - vp_host_env::with(|env| env.wl_storage.storage.get_block_hash().0) + vp_host_env::with(|env| env.state.in_mem().get_block_hash().0) ); assert_eq!( vp::CTX.get_block_epoch().unwrap(), - vp_host_env::with(|env| env - .wl_storage - .storage - .get_current_epoch() - .0) + vp_host_env::with(|env| env.state.in_mem().get_current_epoch().0) ); assert_eq!( vp::CTX.get_native_token().unwrap(), - vp_host_env::with(|env| env - .wl_storage - .storage - .native_token - .clone()) + vp_host_env::with(|env| env.state.in_mem().native_token.clone()) ); } @@ -588,11 +562,8 @@ mod tests { // store wasm codes let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - env.wl_storage.storage.write(&key, code.clone()).unwrap(); - env.wl_storage - .storage - .write(&len_key, code_len.clone()) - .unwrap(); + env.state.write_bytes(&key, &code).unwrap(); + env.state.write_bytes(&len_key, &code_len).unwrap(); }); let mut tx = Tx::new(ChainId::default(), None); tx.add_code_from_hash(code_hash, None) @@ -611,11 +582,8 @@ mod tests { // store wasm codes let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - env.wl_storage.storage.write(&key, code.clone()).unwrap(); - env.wl_storage - .storage - .write(&len_key, code_len.clone()) - .unwrap(); + env.state.write(&key, &code).unwrap(); + env.state.write(&len_key, &code_len).unwrap(); }); let mut tx = Tx::new(ChainId::default(), None); tx.add_code_from_hash(code_hash, None) @@ -661,12 +629,12 @@ mod tests { // Commit env.commit_tx_and_block(); // update the block height for the following client update - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); @@ -708,10 +676,7 @@ mod tests { let (client_id, client_state, writes) = ibc::prepare_client(); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -737,12 +702,12 @@ mod tests { // Commit env.commit_tx_and_block(); // for the next block - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); tx_host_env::set(env); @@ -785,10 +750,7 @@ mod tests { let (client_id, client_state, writes) = ibc::prepare_client(); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }) }); @@ -814,12 +776,12 @@ mod tests { // Commit env.commit_tx_and_block(); // for the next block - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); tx_host_env::set(env); @@ -863,10 +825,7 @@ mod tests { writes.extend(conn_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -893,12 +852,12 @@ mod tests { // Commit env.commit_tx_and_block(); // for the next block - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); tx_host_env::set(env); @@ -936,10 +895,7 @@ mod tests { writes.extend(conn_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -972,12 +928,12 @@ mod tests { // Commit env.commit_tx_and_block(); // for the next block - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); tx_host_env::set(env); @@ -1019,10 +975,7 @@ mod tests { writes.extend(channel_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -1076,10 +1029,7 @@ mod tests { writes.extend(channel_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -1126,10 +1076,7 @@ mod tests { writes.extend(channel_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -1174,12 +1121,12 @@ mod tests { // Commit env.commit_tx_and_block(); // for the next block - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); tx_host_env::set(env); @@ -1213,7 +1160,7 @@ mod tests { tx_host_env::set(env); let balance_key = token::storage_key::balance_key(&token, &sender); let balance: Option = tx_host_env::with(|env| { - env.wl_storage.read(&balance_key).expect("read error") + env.state.read(&balance_key).expect("read error") }); assert_eq!( balance, @@ -1224,7 +1171,7 @@ mod tests { &address::Address::Internal(address::InternalAddress::Ibc), ); let escrow: Option = tx_host_env::with(|env| { - env.wl_storage.read(&escrow_key).expect("read error") + env.state.read(&escrow_key).expect("read error") }); assert_eq!( escrow, @@ -1266,10 +1213,7 @@ mod tests { ); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -1300,11 +1244,11 @@ mod tests { // Check the balance tx_host_env::set(env); let balance: Option = tx_host_env::with(|env| { - env.wl_storage.read(&balance_key).expect("read error") + env.state.read(&balance_key).expect("read error") }); assert_eq!(balance, Some(Amount::from_u64(0))); let minted: Option = tx_host_env::with(|env| { - env.wl_storage.read(&minted_key).expect("read error") + env.state.read(&minted_key).expect("read error") }); assert_eq!(minted, Some(Amount::from_u64(0))); } @@ -1331,10 +1275,7 @@ mod tests { writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -1376,12 +1317,11 @@ mod tests { // Check the balance tx_host_env::set(env); let key = ibc::balance_key_with_ibc_prefix(denom, &receiver); - let balance: Option = tx_host_env::with(|env| { - env.wl_storage.read(&key).expect("read error") - }); + let balance: Option = + tx_host_env::with(|env| env.state.read(&key).expect("read error")); assert_eq!(balance, Some(Amount::from_u64(100))); let minted: Option = tx_host_env::with(|env| { - env.wl_storage.read(&minted_key).expect("read error") + env.state.read(&minted_key).expect("read error") }); assert_eq!(minted, Some(Amount::from_u64(100))); } @@ -1408,10 +1348,7 @@ mod tests { writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -1449,10 +1386,7 @@ mod tests { tx_host_env::set(env); let ack_key = ibc_storage::ack_key(&port_id, &channel_id, sequence); let ack = tx_host_env::with(|env| { - env.wl_storage - .read_bytes(&ack_key) - .expect("read error") - .unwrap() + env.state.read_bytes(&ack_key).expect("read error").unwrap() }); let expected_ack = Hash::sha256(Vec::::from(ibc::transfer_ack_with_error())) @@ -1462,8 +1396,8 @@ mod tests { let receipt_key = ibc_storage::receipt_key(&port_id, &channel_id, sequence); let changed_keys = tx_host_env::with(|env| { - env.wl_storage - .write_log + env.state + .write_log() .verifiers_and_changed_keys(&BTreeSet::new()) .1 }); @@ -1492,10 +1426,7 @@ mod tests { writes.extend(channel_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); // escrow in advance @@ -1503,14 +1434,9 @@ mod tests { &token, &address::Address::Internal(address::InternalAddress::Ibc), ); - let val = Amount::from_uint(100, ibc::ANY_DENOMINATION) - .unwrap() - .serialize_to_vec(); + let val = Amount::from_uint(100, ibc::ANY_DENOMINATION).unwrap(); tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&escrow_key, &val) - .expect("write error"); + env.state.write(&escrow_key, val).expect("write error"); }); // Set this chain as the source zone @@ -1555,15 +1481,14 @@ mod tests { // Check the balance tx_host_env::set(env); let key = token::storage_key::balance_key(&token, &receiver); - let balance: Option = tx_host_env::with(|env| { - env.wl_storage.read(&key).expect("read error") - }); + let balance: Option = + tx_host_env::with(|env| env.state.read(&key).expect("read error")); assert_eq!( balance, Some(Amount::from_uint(200, ibc::ANY_DENOMINATION).unwrap()) ); let escrow: Option = tx_host_env::with(|env| { - env.wl_storage.read(&escrow_key).expect("read error") + env.state.read(&escrow_key).expect("read error") }); assert_eq!( escrow, @@ -1592,10 +1517,7 @@ mod tests { writes.extend(channel_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); // escrow in advance @@ -1607,12 +1529,9 @@ mod tests { denom, &address::Address::Internal(address::InternalAddress::Ibc), ); - let val = Amount::from_u64(100).serialize_to_vec(); + let val = Amount::from_u64(100); tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&escrow_key, &val) - .expect("write error"); + env.state.write(&escrow_key, val).expect("write error"); }); // Set this chain as the source zone @@ -1663,12 +1582,11 @@ mod tests { let denom = format!("{}/{}/{}", dummy_src_port, dummy_src_channel, token); let key = ibc::balance_key_with_ibc_prefix(denom, &receiver); - let balance: Option = tx_host_env::with(|env| { - env.wl_storage.read(&key).expect("read error") - }); + let balance: Option = + tx_host_env::with(|env| env.state.read(&key).expect("read error")); assert_eq!(balance, Some(Amount::from_u64(100))); let escrow: Option = tx_host_env::with(|env| { - env.wl_storage.read(&escrow_key).expect("read error") + env.state.read(&escrow_key).expect("read error") }); assert_eq!(escrow, Some(Amount::from_u64(0))); } @@ -1694,10 +1612,7 @@ mod tests { writes.extend(channel_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }) }); @@ -1719,12 +1634,12 @@ mod tests { let mut env = tx_host_env::take(); env.commit_tx_and_block(); // for the next block - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); tx_host_env::set(env); @@ -1784,10 +1699,7 @@ mod tests { writes.extend(channel_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }) }); @@ -1808,12 +1720,12 @@ mod tests { let mut env = tx_host_env::take(); env.commit_tx_and_block(); // for the next block - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); tx_host_env::set(env); diff --git a/crates/tests/src/vm_host_env/tx.rs b/crates/tests/src/vm_host_env/tx.rs index 7b23a1553d..6e8c357feb 100644 --- a/crates/tests/src/vm_host_env/tx.rs +++ b/crates/tests/src/vm_host_env/tx.rs @@ -1,18 +1,17 @@ use std::borrow::Borrow; +use std::cell::RefCell; use std::collections::BTreeSet; +use namada::core::address::Address; +use namada::core::hash::Hash; +use namada::core::storage::{Key, TxIndex}; +use namada::core::time::DurationSecs; use namada::ledger::gas::TxGasMeter; use namada::ledger::parameters::{self, EpochDuration}; use namada::ledger::storage::mockdb::MockDB; -use namada::ledger::storage::testing::TestStorage; -use namada::ledger::storage::write_log::WriteLog; -use namada::ledger::storage::{Sha256Hasher, WlStorage}; +use namada::ledger::storage::testing::TestState; pub use namada::tx::data::TxType; use namada::tx::Tx; -use namada::types::address::Address; -use namada::types::hash::Hash; -use namada::types::storage::{Key, TxIndex}; -use namada::types::time::DurationSecs; use namada::vm::prefix_iter::PrefixIterators; use namada::vm::wasm::run::Error; use namada::vm::wasm::{self, TxCache, VpCache}; @@ -29,6 +28,7 @@ use crate::vp::TestVpEnv; static mut CTX: Ctx = unsafe { Ctx::new() }; /// Tx execution context provides access to host env functions +#[allow(static_mut_refs)] pub fn ctx() -> &'static mut Ctx { unsafe { &mut CTX } } @@ -47,11 +47,11 @@ pub mod tx_host_env { /// Host environment structures required for transactions. #[derive(Debug)] pub struct TestTxEnv { - pub wl_storage: WlStorage, + pub state: TestState, pub iterators: PrefixIterators<'static, MockDB>, pub verifiers: BTreeSet
, - pub gas_meter: TxGasMeter, - pub sentinel: TxSentinel, + pub gas_meter: RefCell, + pub sentinel: RefCell, pub tx_index: TxIndex, pub result_buffer: Option>, pub vp_wasm_cache: VpCache, @@ -66,18 +66,16 @@ impl Default for TestTxEnv { wasm::compilation_cache::common::testing::cache(); let (tx_wasm_cache, tx_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let wl_storage = WlStorage { - storage: TestStorage::default(), - write_log: WriteLog::default(), - }; - + let state = TestState::default(); let mut tx = Tx::from_type(TxType::Raw); - tx.header.chain_id = wl_storage.storage.chain_id.clone(); + tx.header.chain_id = state.in_mem().chain_id.clone(); Self { - wl_storage, + state, iterators: PrefixIterators::default(), - gas_meter: TxGasMeter::new_from_sub_limit(100_000_000.into()), - sentinel: TxSentinel::default(), + gas_meter: RefCell::new(TxGasMeter::new_from_sub_limit( + 100_000_000.into(), + )), + sentinel: RefCell::new(TxSentinel::default()), tx_index: TxIndex::default(), verifiers: BTreeSet::default(), result_buffer: None, @@ -92,12 +90,12 @@ impl Default for TestTxEnv { impl TestTxEnv { pub fn all_touched_storage_keys(&self) -> BTreeSet { - self.wl_storage.write_log.get_keys() + self.state.write_log().get_keys() } pub fn get_verifiers(&self) -> BTreeSet
{ - self.wl_storage - .write_log + self.state + .write_log() .verifiers_and_changed_keys(&self.verifiers) .0 } @@ -110,7 +108,7 @@ impl TestTxEnv { max_signatures_per_transaction: Option, ) { parameters::update_epoch_parameter( - &mut self.wl_storage, + &mut self.state, &epoch_duration.unwrap_or(EpochDuration { min_num_of_blocks: 1, min_duration: DurationSecs(5), @@ -118,17 +116,17 @@ impl TestTxEnv { ) .unwrap(); parameters::update_tx_allowlist_parameter( - &mut self.wl_storage, + &mut self.state, tx_allowlist.unwrap_or_default(), ) .unwrap(); parameters::update_vp_allowlist_parameter( - &mut self.wl_storage, + &mut self.state, vp_allowlist.unwrap_or_default(), ) .unwrap(); parameters::update_max_signature_per_tx( - &mut self.wl_storage, + &mut self.state, max_signatures_per_transaction.unwrap_or(15), ) .unwrap(); @@ -137,7 +135,7 @@ impl TestTxEnv { pub fn store_wasm_code(&mut self, code: Vec) { let hash = Hash::sha256(&code); let key = Key::wasm_code(&hash); - self.wl_storage.storage.write(&key, code).unwrap(); + self.state.db_write(&key, code).unwrap(); } /// Fake accounts' existence by initializing their VP storage. @@ -159,9 +157,8 @@ impl TestTxEnv { } let key = Key::validity_predicate(address.borrow()); let vp_code = vec![]; - self.wl_storage - .storage - .write(&key, vp_code) + self.state + .db_write(&key, vp_code) .expect("Unable to write VP"); } } @@ -173,7 +170,7 @@ impl TestTxEnv { threshold: u8, ) { account::init_account_storage( - &mut self.wl_storage, + &mut self.state, owner, &public_keys, threshold, @@ -188,21 +185,20 @@ impl TestTxEnv { threshold: u8, ) { let storage_key = account::threshold_key(address); - self.wl_storage - .storage - .write(&storage_key, threshold.serialize_to_vec()) + self.state + .db_write(&storage_key, threshold.serialize_to_vec()) .unwrap(); } /// Commit the genesis state. Typically, you'll want to call this after /// setting up the initial state, before running a transaction. pub fn commit_genesis(&mut self) { - self.wl_storage.commit_block().unwrap(); + self.state.commit_block().unwrap(); } pub fn commit_tx_and_block(&mut self) { - self.wl_storage.commit_tx(); - self.wl_storage + self.state.commit_tx(); + self.state .commit_block() .map_err(|err| println!("{:?}", err)) .ok(); @@ -218,18 +214,16 @@ impl TestTxEnv { amount: token::Amount, ) { let storage_key = token::storage_key::balance_key(token, target); - self.wl_storage - .storage - .write(&storage_key, amount.serialize_to_vec()) + self.state + .db_write(&storage_key, amount.serialize_to_vec()) .unwrap(); } /// Apply the tx changes to the write log. pub fn execute_tx(&mut self) -> Result<(), Error> { wasm::run::tx( - &self.wl_storage.storage, - &mut self.wl_storage.write_log, - &mut self.gas_meter, + &mut self.state, + &self.gas_meter, &self.tx_index, &self.tx, &mut self.vp_wasm_cache, @@ -244,8 +238,6 @@ impl TestTxEnv { /// invoked host environment functions and so it must be initialized /// before the test. mod native_tx_host_env { - - use std::cell::RefCell; use std::pin::Pin; // TODO replace with `std::concat_idents` once stabilized (https://github.com/rust-lang/rust/issues/29599) @@ -258,7 +250,7 @@ mod native_tx_host_env { /// A [`TestTxEnv`] that can be used for tx host env functions calls /// that implements the WASM host environment in native environment. pub static ENV: RefCell>>> = - RefCell::new(None); + const { RefCell::new(None) }; } /// Initialize the tx host environment in [`ENV`]. This will be used in the @@ -317,14 +309,14 @@ mod native_tx_host_env { /// changes. pub fn set_from_vp_env(vp_env: TestVpEnv) { let TestVpEnv { - wl_storage, + state, tx, vp_wasm_cache, vp_cache_dir, .. } = vp_env; let tx_env = TestTxEnv { - wl_storage, + state, vp_wasm_cache, vp_cache_dir, tx, @@ -343,7 +335,7 @@ mod native_tx_host_env { #[no_mangle] extern "C" fn extern_fn_name( $($arg: $type),* ) { with(|TestTxEnv { - wl_storage, + state, iterators, verifiers, gas_meter, @@ -358,8 +350,7 @@ mod native_tx_host_env { }: &mut TestTxEnv| { let tx_env = vm::host_env::testing::tx_env( - &wl_storage.storage, - &mut wl_storage.write_log, + state, iterators, verifiers, gas_meter, @@ -386,7 +377,7 @@ mod native_tx_host_env { extern "C" fn extern_fn_name( $($arg: $type),* ) -> $ret { with(|TestTxEnv { tx_index, - wl_storage, + state, iterators, verifiers, gas_meter, @@ -400,8 +391,7 @@ mod native_tx_host_env { }: &mut TestTxEnv| { let tx_env = vm::host_env::testing::tx_env( - &wl_storage.storage, - &mut wl_storage.write_log, + state, iterators, verifiers, gas_meter, @@ -427,7 +417,7 @@ mod native_tx_host_env { #[no_mangle] extern "C" fn extern_fn_name( $($arg: $type),* ) { with(|TestTxEnv { - wl_storage, + state, iterators, verifiers, gas_meter, @@ -442,8 +432,7 @@ mod native_tx_host_env { }: &mut TestTxEnv| { let tx_env = vm::host_env::testing::tx_env( - &wl_storage.storage, - &mut wl_storage.write_log, + state, iterators, verifiers, gas_meter, @@ -511,10 +500,11 @@ mod native_tx_host_env { #[cfg(test)] mod tests { - use namada::ledger::storage::mockdb::MockDB; - use namada::types::storage; + use namada::core::storage; use namada::vm::host_env::{self, TxVmEnv}; use namada::vm::memory::VmMemory; + use namada_core::hash::Sha256Hasher; + use namada_tx_prelude::StorageWrite; use proptest::prelude::*; use test_log::test; @@ -702,20 +692,19 @@ mod tests { if setup.write_to_storage { // Write the key-val to storage which may affect `tx_read` execution // path - let _res = - test_env.wl_storage.storage.write(&setup.key, &setup.val); + let _res = test_env.state.write_bytes(&setup.key, &setup.val); } if setup.write_to_wl { // Write the key-val to write log which may affect `tx_read` // execution path let _res = test_env - .wl_storage - .write_log + .state + .write_log_mut() .write(&setup.key, setup.val.clone()); } let TestTxEnv { - wl_storage, + state, iterators, verifiers, gas_meter, @@ -730,8 +719,7 @@ mod tests { } = test_env; let tx_env = vm::host_env::testing::tx_env_with_wasm_memory( - &wl_storage.storage, - &mut wl_storage.write_log, + state, iterators, verifiers, gas_meter, @@ -759,7 +747,7 @@ mod tests { any::(), any::(), any::(), - namada::types::storage::testing::arb_key(), + namada::core::storage::testing::arb_key(), arb_u64(), arb_u64(), any::>(), diff --git a/crates/tests/src/vm_host_env/vp.rs b/crates/tests/src/vm_host_env/vp.rs index a1963fe4ce..d36e73dce9 100644 --- a/crates/tests/src/vm_host_env/vp.rs +++ b/crates/tests/src/vm_host_env/vp.rs @@ -1,15 +1,14 @@ +use std::cell::RefCell; use std::collections::BTreeSet; +use namada::core::address::{self, Address}; +use namada::core::storage::{self, Key, TxIndex}; use namada::gas::TxGasMeter; use namada::ledger::gas::VpGasMeter; use namada::ledger::storage::mockdb::MockDB; -use namada::ledger::storage::testing::TestStorage; -use namada::ledger::storage::write_log::WriteLog; -use namada::ledger::storage::{Sha256Hasher, WlStorage}; +use namada::ledger::storage::testing::TestState; use namada::tx::data::TxType; use namada::tx::Tx; -use namada::types::address::{self, Address}; -use namada::types::storage::{self, Key, TxIndex}; use namada::vm::prefix_iter::PrefixIterators; use namada::vm::wasm::{self, VpCache}; use namada::vm::{self, WasmCacheRwAccess}; @@ -42,10 +41,10 @@ pub mod vp_host_env { #[derive(Debug)] pub struct TestVpEnv { pub addr: Address, - pub wl_storage: WlStorage, + pub state: TestState, pub iterators: PrefixIterators<'static, MockDB>, - pub gas_meter: VpGasMeter, - pub sentinel: VpSentinel, + pub gas_meter: RefCell, + pub sentinel: RefCell, pub tx: Tx, pub tx_index: TxIndex, pub keys_changed: BTreeSet, @@ -66,20 +65,17 @@ impl Default for TestVpEnv { let (vp_wasm_cache, vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let wl_storage = WlStorage { - storage: TestStorage::default(), - write_log: WriteLog::default(), - }; + let state = TestState::default(); let mut tx = Tx::from_type(TxType::Raw); - tx.header.chain_id = wl_storage.storage.chain_id.clone(); + tx.header.chain_id = state.in_mem().chain_id.clone(); Self { addr: address::testing::established_address_1(), - wl_storage, + state, iterators: PrefixIterators::default(), - gas_meter: VpGasMeter::new_from_tx_meter( + gas_meter: RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(10_000_000_000.into()), - ), - sentinel: VpSentinel::default(), + )), + sentinel: RefCell::new(VpSentinel::default()), tx, tx_index: TxIndex::default(), keys_changed: BTreeSet::default(), @@ -94,12 +90,12 @@ impl Default for TestVpEnv { impl TestVpEnv { pub fn all_touched_storage_keys(&self) -> BTreeSet { - self.wl_storage.write_log.get_keys() + self.state.write_log().get_keys() } pub fn get_verifiers(&self) -> BTreeSet
{ - self.wl_storage - .write_log + self.state + .write_log() .verifiers_and_changed_keys(&self.verifiers) .0 } @@ -110,22 +106,19 @@ impl TestVpEnv { /// invoked host environment functions and so it must be initialized /// before the test. mod native_vp_host_env { - - use std::cell::RefCell; use std::pin::Pin; // TODO replace with `std::concat_idents` once stabilized (https://github.com/rust-lang/rust/issues/29599) use concat_idents::concat_idents; - use namada::state::Sha256Hasher; + use namada::state::StateRead; use namada::vm::host_env::*; - use namada::vm::WasmCacheRwAccess; use super::*; #[cfg(feature = "wasm-runtime")] pub type VpEval = namada::vm::wasm::run::VpEvalWasm< - MockDB, - Sha256Hasher, + ::D, + ::H, WasmCacheRwAccess, >; #[cfg(not(feature = "wasm-runtime"))] @@ -135,7 +128,7 @@ mod native_vp_host_env { /// A [`TestVpEnv`] that can be used for VP host env functions calls /// that implements the WASM host environment in native environment. pub static ENV: RefCell>>> = - RefCell::new(None); + const {RefCell::new(None) }; } /// Initialize the VP environment in [`ENV`]. This will be used in the @@ -197,7 +190,7 @@ mod native_vp_host_env { // Write an empty validity predicate for the address, because it's used // to check if the address exists when we write into its storage let vp_key = Key::validity_predicate(&addr); - tx_env.wl_storage.storage.write(&vp_key, vec![]).unwrap(); + tx_env.state.db_write(&vp_key, vec![]).unwrap(); tx_host_env::set(tx_env); apply_tx(&addr); @@ -205,8 +198,8 @@ mod native_vp_host_env { let tx_env = tx_host_env::take(); let verifiers_from_tx = &tx_env.verifiers; let (verifiers, keys_changed) = tx_env - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(verifiers_from_tx); if !verifiers.contains(&addr) { panic!( @@ -218,7 +211,7 @@ mod native_vp_host_env { let vp_env = TestVpEnv { addr, - wl_storage: tx_env.wl_storage, + state: tx_env.state, keys_changed, verifiers, ..Default::default() @@ -239,7 +232,7 @@ mod native_vp_host_env { _ctx: VpCtx<'static, Self::Db, Self::H, Self::Eval, Self::CA>, _vp_code_hash: Vec, _input_data: Vec, - ) -> namada::types::internal::HostEnvResult { + ) -> namada::core::internal::HostEnvResult { unimplemented!( "The \"wasm-runtime\" feature must be enabled to test with \ the `eval` function." @@ -258,7 +251,7 @@ mod native_vp_host_env { extern "C" fn extern_fn_name( $($arg: $type),* ) { with(|TestVpEnv { addr, - wl_storage, + state, iterators, gas_meter, sentinel, @@ -274,8 +267,7 @@ mod native_vp_host_env { let env = vm::host_env::testing::vp_env( addr, - &wl_storage.storage, - &wl_storage.write_log, + state, iterators, gas_meter, sentinel, @@ -303,7 +295,7 @@ mod native_vp_host_env { extern "C" fn extern_fn_name( $($arg: $type),* ) -> $ret { with(|TestVpEnv { addr, - wl_storage, + state, iterators, gas_meter, sentinel, @@ -319,8 +311,7 @@ mod native_vp_host_env { let env = vm::host_env::testing::vp_env( addr, - &wl_storage.storage, - &wl_storage.write_log, + state, iterators, gas_meter, sentinel, diff --git a/crates/token/src/lib.rs b/crates/token/src/lib.rs index 9a97c4f89f..136b324001 100644 --- a/crates/token/src/lib.rs +++ b/crates/token/src/lib.rs @@ -9,12 +9,13 @@ pub mod storage_key { pub use namada_trans_token::storage_key::*; } -use namada_core::types::address::Address; +use namada_core::address::Address; +use namada_core::event::EmitEvents; use namada_storage::{Result, StorageRead, StorageWrite}; /// Initialize parameters for the token in storage during the genesis block. pub fn write_params( - params: &Option, + params: &Option, storage: &mut S, address: &Address, denom: &Denomination, @@ -28,3 +29,17 @@ where } Ok(()) } + +pub fn finalize_block( + storage: &mut S, + _events: &mut impl EmitEvents, + is_new_epoch: bool, +) -> Result<()> +where + S: StorageWrite + StorageRead + WithConversionState, +{ + if is_new_epoch { + conversion::update_allowed_conversions(storage)?; + } + Ok(()) +} diff --git a/crates/core/src/ledger/inflation.rs b/crates/trans_token/src/inflation.rs similarity index 99% rename from crates/core/src/ledger/inflation.rs rename to crates/trans_token/src/inflation.rs index d7600a2d5c..b9be694a20 100644 --- a/crates/core/src/ledger/inflation.rs +++ b/crates/trans_token/src/inflation.rs @@ -2,8 +2,8 @@ //! proof-of-stake, providing liquity to shielded asset pools, and public goods //! funding. -use crate::types::dec::Dec; -use crate::types::uint::Uint; +use namada_core::dec::Dec; +use namada_core::uint::Uint; /// Holds the PD controller values that should be updated in storage #[allow(missing_docs)] diff --git a/crates/trans_token/src/lib.rs b/crates/trans_token/src/lib.rs index 2a5af85457..ea8b646005 100644 --- a/crates/trans_token/src/lib.rs +++ b/crates/trans_token/src/lib.rs @@ -1,7 +1,8 @@ //! Transparent token types, storage functions, and validation. +pub mod inflation; mod storage; pub mod storage_key; -pub use namada_core::types::token::*; +pub use namada_core::token::*; pub use storage::*; diff --git a/crates/trans_token/src/storage.rs b/crates/trans_token/src/storage.rs index 4f03ed1509..ca429b7f5b 100644 --- a/crates/trans_token/src/storage.rs +++ b/crates/trans_token/src/storage.rs @@ -1,6 +1,6 @@ +use namada_core::address::{Address, InternalAddress}; use namada_core::hints; -use namada_core::types::address::{Address, InternalAddress}; -use namada_core::types::token::{self, Amount, DenominatedAmount}; +use namada_core::token::{self, Amount, DenominatedAmount}; use namada_storage as storage; use namada_storage::{StorageRead, StorageWrite}; @@ -229,7 +229,7 @@ pub fn denom_to_amount( #[cfg(test)] mod testing { - use namada_core::types::{address, token}; + use namada_core::{address, token}; use namada_storage::testing::TestStorage; use super::{burn_tokens, credit_tokens, read_balance, read_total_supply}; @@ -237,7 +237,7 @@ mod testing { #[test] fn test_burn_native_tokens() { let mut storage = TestStorage::default(); - let native_token = address::nam(); + let native_token = address::testing::nam(); // Get some addresses let addr1 = address::testing::gen_implicit_address(); diff --git a/crates/trans_token/src/storage_key.rs b/crates/trans_token/src/storage_key.rs index 7797d68b51..9f0d2247c3 100644 --- a/crates/trans_token/src/storage_key.rs +++ b/crates/trans_token/src/storage_key.rs @@ -1,7 +1,7 @@ //! Transparent token storage keys -use namada_core::types::address::{Address, InternalAddress}; -use namada_core::types::storage::{self, DbKeySeg, KeySeg}; +use namada_core::address::{Address, InternalAddress}; +use namada_core::storage::{self, DbKeySeg, KeySeg}; /// Key segment for a balance key pub const BALANCE_STORAGE_KEY: &str = "balance"; @@ -196,7 +196,7 @@ pub fn is_any_shielded_action_balance_key( [ token, &Address::Internal( - namada_core::types::address::InternalAddress::Ibc, + namada_core::address::InternalAddress::Ibc, ), ] }) diff --git a/crates/tx/src/data/eval_vp.rs b/crates/tx/src/data/eval_vp.rs index e248018ad5..69562734fa 100644 --- a/crates/tx/src/data/eval_vp.rs +++ b/crates/tx/src/data/eval_vp.rs @@ -1,5 +1,5 @@ use namada_core::borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::hash::Hash; +use namada_core::hash::Hash; use serde::{Deserialize, Serialize}; use crate::Tx; diff --git a/crates/tx/src/data/mod.rs b/crates/tx/src/data/mod.rs index f3d93131b7..e187bdd9fb 100644 --- a/crates/tx/src/data/mod.rs +++ b/crates/tx/src/data/mod.rs @@ -18,14 +18,14 @@ use std::fmt::{self, Display}; use std::str::FromStr; pub use decrypted::*; +use namada_core::address::Address; use namada_core::borsh::{ BorshDeserialize, BorshSchema, BorshSerialize, BorshSerializeExt, }; -use namada_core::types::address::Address; -use namada_core::types::ethereum_structs::EthBridgeEvent; -use namada_core::types::hash::Hash; -use namada_core::types::ibc::IbcEvent; -use namada_core::types::storage; +use namada_core::ethereum_structs::EthBridgeEvent; +use namada_core::hash::Hash; +use namada_core::ibc::IbcEvent; +use namada_core::storage; use namada_gas::{Gas, VpsGas}; use num_derive::{FromPrimitive, ToPrimitive}; use num_traits::{FromPrimitive, ToPrimitive}; @@ -341,10 +341,10 @@ impl TxSentinel { #[cfg(test)] mod test_process_tx { use assert_matches::assert_matches; - use namada_core::types::address::nam; - use namada_core::types::key::*; - use namada_core::types::storage::Epoch; - use namada_core::types::token::{Amount, DenominatedAmount}; + use namada_core::address::testing::nam; + use namada_core::key::*; + use namada_core::storage::Epoch; + use namada_core::token::{Amount, DenominatedAmount}; use super::*; use crate::{Code, Data, Section, Signature, Tx, TxError}; @@ -505,7 +505,7 @@ fn test_process_tx_decrypted_unsigned() { /// signature #[test] fn test_process_tx_decrypted_signed() { - use namada_core::types::key::*; + use namada_core::key::*; use crate::{Code, Data, Section, Signature, Tx}; @@ -517,7 +517,7 @@ fn test_process_tx_decrypted_signed() { ed25519::SigScheme::generate(&mut rng).try_to_sk().unwrap() } - use namada_core::types::key::Signature as S; + use namada_core::key::Signature as S; let mut decrypted = Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); // Invalid signed data diff --git a/crates/tx/src/data/pgf.rs b/crates/tx/src/data/pgf.rs index 284b930dd1..5343ae4daa 100644 --- a/crates/tx/src/data/pgf.rs +++ b/crates/tx/src/data/pgf.rs @@ -1,8 +1,8 @@ use std::collections::HashMap; +use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::address::Address; -use namada_core::types::dec::Dec; +use namada_core::dec::Dec; use serde::{Deserialize, Serialize}; use thiserror::Error; @@ -33,8 +33,8 @@ pub struct UpdateStewardCommission { #[cfg(any(test, feature = "testing"))] /// Tests and strategies for PGF pub mod tests { - use namada_core::types::address::testing::arb_non_internal_address; - use namada_core::types::dec::testing::arb_dec; + use namada_core::address::testing::arb_non_internal_address; + use namada_core::dec::testing::arb_dec; use proptest::{collection, prop_compose}; use super::UpdateStewardCommission; diff --git a/crates/tx/src/data/pos.rs b/crates/tx/src/data/pos.rs index 73d42d997e..3805556780 100644 --- a/crates/tx/src/data/pos.rs +++ b/crates/tx/src/data/pos.rs @@ -1,10 +1,10 @@ //! Types used for PoS system transactions +use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use namada_core::types::address::Address; -use namada_core::types::dec::Dec; -use namada_core::types::key::{common, secp256k1}; -use namada_core::types::token; +use namada_core::dec::Dec; +use namada_core::key::{common, secp256k1}; +use namada_core::token; use serde::{Deserialize, Serialize}; /// A tx data type to become a validator account. @@ -214,10 +214,10 @@ pub struct ConsensusKeyChange { #[cfg(any(test, feature = "testing"))] /// Tests and strategies for proof-of-stake pub mod tests { - use namada_core::types::address::testing::arb_non_internal_address; - use namada_core::types::dec::testing::arb_dec; - use namada_core::types::key::testing::{arb_common_pk, arb_pk}; - use namada_core::types::token::testing::arb_amount; + use namada_core::address::testing::arb_non_internal_address; + use namada_core::dec::testing::arb_dec; + use namada_core::key::testing::{arb_common_pk, arb_pk}; + use namada_core::token::testing::arb_amount; use proptest::{option, prop_compose}; use super::*; diff --git a/crates/tx/src/data/protocol.rs b/crates/tx/src/data/protocol.rs index b1ce5892fa..f9244e62d2 100644 --- a/crates/tx/src/data/protocol.rs +++ b/crates/tx/src/data/protocol.rs @@ -4,7 +4,7 @@ use namada_core::borsh::{ BorshDeserialize, BorshSchema, BorshSerialize, BorshSerializeExt, }; -use namada_core::types::key::*; +use namada_core::key::*; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; diff --git a/crates/tx/src/data/wrapper.rs b/crates/tx/src/data/wrapper.rs index a5867ad932..a70ebaa39d 100644 --- a/crates/tx/src/data/wrapper.rs +++ b/crates/tx/src/data/wrapper.rs @@ -8,15 +8,15 @@ pub mod wrapper_tx { pub use ark_bls12_381::Bls12_381 as EllipticCurve; use masp_primitives::transaction::Transaction; + use namada_core::address::{Address, MASP}; use namada_core::borsh::{ BorshDeserialize, BorshSchema, BorshSerialize, BorshSerializeExt, }; - use namada_core::types::address::{Address, MASP}; - use namada_core::types::hash::Hash; - use namada_core::types::key::*; - use namada_core::types::storage::Epoch; - use namada_core::types::token::{Amount, DenominatedAmount, Transfer}; - use namada_core::types::uint::Uint; + use namada_core::hash::Hash; + use namada_core::key::*; + use namada_core::storage::Epoch; + use namada_core::token::{Amount, DenominatedAmount, Transfer}; + use namada_core::uint::Uint; use namada_gas::Gas; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; diff --git a/crates/tx/src/lib.rs b/crates/tx/src/lib.rs index 6c69d1f45c..6c866e8513 100644 --- a/crates/tx/src/lib.rs +++ b/crates/tx/src/lib.rs @@ -4,14 +4,60 @@ pub mod data; pub mod proto; mod types; -pub use namada_core::types::key::SignableEthMessage; -pub use namada_core::types::sign::SignatureIndex; +use std::collections::HashMap; + +use data::TxType; +use namada_core::event::{Event, EventLevel, EventType}; +pub use namada_core::key::SignableEthMessage; +pub use namada_core::sign::SignatureIndex; pub use types::{ standalone_signature, verify_standalone_sig, Code, Commitment, CompressedSignature, Data, DecodeError, Header, MaspBuilder, Memo, Section, Signature, Signed, Signer, Tx, TxError, VerifySigError, }; +/// Creates a new event with the hash and height of the transaction +/// already filled in +pub fn new_tx_event(tx: &Tx, height: u64) -> Event { + let mut event = match tx.header().tx_type { + TxType::Wrapper(_) => { + let mut event = Event { + event_type: EventType::Accepted, + level: EventLevel::Tx, + attributes: HashMap::new(), + }; + event["hash"] = tx.header_hash().to_string(); + event + } + TxType::Decrypted(_) => { + let mut event = Event { + event_type: EventType::Applied, + level: EventLevel::Tx, + attributes: HashMap::new(), + }; + event["hash"] = tx + .clone() + .update_header(TxType::Raw) + .header_hash() + .to_string(); + event + } + TxType::Protocol(_) => { + let mut event = Event { + event_type: EventType::Applied, + level: EventLevel::Tx, + attributes: HashMap::new(), + }; + event["hash"] = tx.header_hash().to_string(); + event + } + _ => unreachable!(), + }; + event["height"] = height.to_string(); + event["log"] = "".to_string(); + event +} + #[cfg(test)] mod tests { use data_encoding::HEXLOWER; diff --git a/crates/tx/src/types.rs b/crates/tx/src/types.rs index 432e217e29..9533209425 100644 --- a/crates/tx/src/types.rs +++ b/crates/tx/src/types.rs @@ -1,7 +1,6 @@ use std::borrow::Cow; use std::cmp::Ordering; use std::collections::{BTreeMap, HashMap, HashSet}; -use std::convert::TryFrom; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; @@ -10,18 +9,18 @@ use masp_primitives::transaction::builder::Builder; use masp_primitives::transaction::components::sapling::builder::SaplingMetadata; use masp_primitives::transaction::Transaction; use masp_primitives::zip32::ExtendedFullViewingKey; +use namada_core::account::AccountPublicKeysMap; +use namada_core::address::Address; use namada_core::borsh::schema::{add_definition, Declaration, Definition}; use namada_core::borsh::{ BorshDeserialize, BorshSchema, BorshSerialize, BorshSerializeExt, }; -use namada_core::types::account::AccountPublicKeysMap; -use namada_core::types::address::Address; -use namada_core::types::chain::ChainId; -use namada_core::types::key::*; -use namada_core::types::masp::AssetData; -use namada_core::types::sign::SignatureIndex; -use namada_core::types::storage::Epoch; -use namada_core::types::time::DateTimeUtc; +use namada_core::chain::ChainId; +use namada_core::key::*; +use namada_core::masp::AssetData; +use namada_core::sign::SignatureIndex; +use namada_core::storage::Epoch; +use namada_core::time::DateTimeUtc; use serde::de::Error as SerdeError; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; @@ -36,7 +35,7 @@ use crate::proto; #[derive(Error, Debug)] pub enum VerifySigError { #[error("{0}")] - VerifySig(#[from] namada_core::types::key::VerifySigError), + VerifySig(#[from] namada_core::key::VerifySigError), #[error("{0}")] Gas(#[from] namada_gas::Error), #[error("The wrapper signature is invalid.")] @@ -249,7 +248,7 @@ pub struct CommitmentError; )] pub enum Commitment { /// Result of applying hash function to bytes - Hash(namada_core::types::hash::Hash), + Hash(namada_core::hash::Hash), /// Result of applying identity function to bytes Id(Vec), } @@ -279,7 +278,7 @@ impl Commitment { } /// Return the contained hash commitment - pub fn hash(&self) -> namada_core::types::hash::Hash { + pub fn hash(&self) -> namada_core::hash::Hash { match self { Self::Id(code) => hash_tx(code), Self::Hash(hash) => *hash, @@ -327,7 +326,7 @@ impl Code { /// Make a new code section with the given hash pub fn from_hash( - hash: namada_core::types::hash::Hash, + hash: namada_core::hash::Hash, tag: Option, ) -> Self { Self { @@ -377,7 +376,7 @@ pub enum Signer { )] pub struct Signature { /// The hash of the section being signed - pub targets: Vec, + pub targets: Vec, /// The public keys against which the signatures should be verified pub signer: Signer, /// The signature over the above hash @@ -387,7 +386,7 @@ pub struct Signature { impl Signature { /// Sign the given section hash with the given key and return a section pub fn new( - targets: Vec, + targets: Vec, secret_keys: BTreeMap, signer: Option
, ) -> Self { @@ -437,13 +436,13 @@ impl Signature { } /// Get the hash of this section - pub fn get_hash(&self) -> namada_core::types::hash::Hash { - namada_core::types::hash::Hash( + pub fn get_hash(&self) -> namada_core::hash::Hash { + namada_core::hash::Hash( self.hash(&mut Sha256::new()).finalize_reset().into(), ) } - pub fn get_raw_hash(&self) -> namada_core::types::hash::Hash { + pub fn get_raw_hash(&self) -> namada_core::hash::Hash { Self { signer: Signer::PubKeys(vec![]), signatures: BTreeMap::new(), @@ -657,7 +656,7 @@ impl From for Vec { )] pub struct MaspBuilder { /// The MASP transaction that this section witnesses - pub target: namada_core::types::hash::Hash, + pub target: namada_core::hash::Hash, /// The decoded set of asset types used by the transaction. Useful for /// offline wallets trying to display AssetTypes. pub asset_types: HashSet, @@ -757,8 +756,8 @@ impl Section { } /// Get the hash of this section - pub fn get_hash(&self) -> namada_core::types::hash::Hash { - namada_core::types::hash::Hash( + pub fn get_hash(&self) -> namada_core::hash::Hash { + namada_core::hash::Hash( self.hash(&mut Sha256::new()).finalize_reset().into(), ) } @@ -864,14 +863,14 @@ pub struct Header { /// A transaction timestamp pub timestamp: DateTimeUtc, /// The SHA-256 hash of the transaction's code section - pub code_hash: namada_core::types::hash::Hash, + pub code_hash: namada_core::hash::Hash, /// The SHA-256 hash of the transaction's data section - pub data_hash: namada_core::types::hash::Hash, + pub data_hash: namada_core::hash::Hash, /// The SHA-256 hash of the transaction's memo section /// /// In case a memo is not present in the transaction, a /// byte array filled with zeroes is present instead - pub memo_hash: namada_core::types::hash::Hash, + pub memo_hash: namada_core::hash::Hash, /// The type of this transaction pub tx_type: TxType, } @@ -884,9 +883,9 @@ impl Header { chain_id: ChainId::default(), expiration: None, timestamp: DateTimeUtc::now(), - code_hash: namada_core::types::hash::Hash::default(), - data_hash: namada_core::types::hash::Hash::default(), - memo_hash: namada_core::types::hash::Hash::default(), + code_hash: namada_core::hash::Hash::default(), + data_hash: namada_core::hash::Hash::default(), + memo_hash: namada_core::hash::Hash::default(), } } @@ -1024,12 +1023,12 @@ impl Tx { } /// Get the transaction header hash - pub fn header_hash(&self) -> namada_core::types::hash::Hash { + pub fn header_hash(&self) -> namada_core::hash::Hash { Section::Header(self.header.clone()).get_hash() } /// Gets the hash of the decrypted transaction's header - pub fn raw_header_hash(&self) -> namada_core::types::hash::Hash { + pub fn raw_header_hash(&self) -> namada_core::hash::Hash { let mut raw_header = self.header(); raw_header.tx_type = TxType::Raw; @@ -1037,7 +1036,7 @@ impl Tx { } /// Get hashes of all the sections in this transaction - pub fn sechashes(&self) -> Vec { + pub fn sechashes(&self) -> Vec { let mut hashes = vec![self.header_hash()]; for sec in &self.sections { hashes.push(sec.get_hash()); @@ -1054,7 +1053,7 @@ impl Tx { /// Get the transaction section with the given hash pub fn get_section( &self, - hash: &namada_core::types::hash::Hash, + hash: &namada_core::hash::Hash, ) -> Option> { if self.header_hash() == *hash { return Some(Cow::Owned(Section::Header(self.header.clone()))); @@ -1072,18 +1071,18 @@ impl Tx { } /// Set the transaction memo hash stored in the header - pub fn set_memo_sechash(&mut self, hash: namada_core::types::hash::Hash) { + pub fn set_memo_sechash(&mut self, hash: namada_core::hash::Hash) { self.header.memo_hash = hash; } /// Get the hash of this transaction's memo from the heeader - pub fn memo_sechash(&self) -> &namada_core::types::hash::Hash { + pub fn memo_sechash(&self) -> &namada_core::hash::Hash { &self.header.memo_hash } /// Get the memo designated by the memo hash in the header pub fn memo(&self) -> Option> { - if self.memo_sechash() == &namada_core::types::hash::Hash::default() { + if self.memo_sechash() == &namada_core::hash::Hash::default() { return None; } match self @@ -1103,12 +1102,12 @@ impl Tx { } /// Get the hash of this transaction's code from the heeader - pub fn code_sechash(&self) -> &namada_core::types::hash::Hash { + pub fn code_sechash(&self) -> &namada_core::hash::Hash { &self.header.code_hash } /// Set the transaction code hash stored in the header - pub fn set_code_sechash(&mut self, hash: namada_core::types::hash::Hash) { + pub fn set_code_sechash(&mut self, hash: namada_core::hash::Hash) { self.header.code_hash = hash } @@ -1133,12 +1132,12 @@ impl Tx { } /// Get the transaction data hash stored in the header - pub fn data_sechash(&self) -> &namada_core::types::hash::Hash { + pub fn data_sechash(&self) -> &namada_core::hash::Hash { &self.header.data_hash } /// Set the transaction data hash stored in the header - pub fn set_data_sechash(&mut self, hash: namada_core::types::hash::Hash) { + pub fn set_data_sechash(&mut self, hash: namada_core::hash::Hash) { self.header.data_hash = hash } @@ -1179,7 +1178,7 @@ impl Tx { /// public key pub fn verify_signatures( &self, - hashes: &[namada_core::types::hash::Hash], + hashes: &[namada_core::hash::Hash], public_keys_index_map: AccountPublicKeysMap, signer: &Option
, threshold: u8, @@ -1252,11 +1251,11 @@ impl Tx { pub fn verify_signature( &self, public_key: &common::PublicKey, - hashes: &[namada_core::types::hash::Hash], + hashes: &[namada_core::hash::Hash], ) -> Result<&Signature, VerifySigError> { self.verify_signatures( hashes, - AccountPublicKeysMap::from_iter([public_key.clone()].into_iter()), + AccountPublicKeysMap::from_iter([public_key.clone()]), &None, 1, None, @@ -1390,9 +1389,9 @@ impl Tx { /// Add an extra section to the tx builder by hash pub fn add_extra_section_from_hash( &mut self, - hash: namada_core::types::hash::Hash, + hash: namada_core::hash::Hash, tag: Option, - ) -> namada_core::types::hash::Hash { + ) -> namada_core::hash::Hash { let sechash = self .add_section(Section::ExtraData(Code::from_hash(hash, tag))) .get_hash(); @@ -1404,7 +1403,7 @@ impl Tx { &mut self, code: Vec, tag: Option, - ) -> (&mut Self, namada_core::types::hash::Hash) { + ) -> (&mut Self, namada_core::hash::Hash) { let sechash = self .add_section(Section::ExtraData(Code::new(code, tag))) .get_hash(); @@ -1415,7 +1414,7 @@ impl Tx { pub fn add_memo( &mut self, memo: &[u8], - ) -> (&mut Self, namada_core::types::hash::Hash) { + ) -> (&mut Self, namada_core::hash::Hash) { let sechash = self .add_section(Section::ExtraData(Code::new(memo.to_vec(), None))) .get_hash(); @@ -1427,7 +1426,7 @@ impl Tx { pub fn add_masp_tx_section( &mut self, tx: Transaction, - ) -> (&mut Self, namada_core::types::hash::Hash) { + ) -> (&mut Self, namada_core::hash::Hash) { let sechash = self.add_section(Section::MaspTx(tx)).get_hash(); (self, sechash) } @@ -1441,7 +1440,7 @@ impl Tx { /// Add wasm code to the tx builder from hash pub fn add_code_from_hash( &mut self, - code_hash: namada_core::types::hash::Hash, + code_hash: namada_core::hash::Hash, tag: Option, ) -> &mut Self { self.set_code(Code::from_hash(code_hash, tag)); @@ -1478,7 +1477,7 @@ impl Tx { fee_payer: common::PublicKey, epoch: Epoch, gas_limit: GasLimit, - fee_unshield_hash: Option, + fee_unshield_hash: Option, ) -> &mut Self { self.header.tx_type = TxType::Wrapper(Box::new(WrapperTx::new( fee, @@ -1515,7 +1514,7 @@ impl Tx { let secret_keys = if signer.is_some() { account_public_keys_map.index_secret_keys(keypairs) } else { - (0..).zip(keypairs.into_iter()).collect() + (0..).zip(keypairs).collect() }; self.add_section(Section::Signature(Signature::new( diff --git a/crates/tx_env/src/lib.rs b/crates/tx_env/src/lib.rs index ce20545986..00601693cb 100644 --- a/crates/tx_env/src/lib.rs +++ b/crates/tx_env/src/lib.rs @@ -1,10 +1,10 @@ //! Transaction environment contains functions that can be called from //! inside a tx. +use namada_core::address::Address; use namada_core::borsh::BorshSerialize; -use namada_core::types::address::Address; -use namada_core::types::ibc::IbcEvent; -use namada_core::types::storage; +use namada_core::ibc::IbcEvent; +use namada_core::storage; use namada_storage::{Result, StorageRead, StorageWrite}; /// Transaction host functions diff --git a/crates/tx_prelude/src/ibc.rs b/crates/tx_prelude/src/ibc.rs index 0857875382..9ae6f732bb 100644 --- a/crates/tx_prelude/src/ibc.rs +++ b/crates/tx_prelude/src/ibc.rs @@ -3,9 +3,9 @@ use std::cell::RefCell; use std::rc::Rc; -use namada_core::types::address::{Address, InternalAddress}; -pub use namada_core::types::ibc::{IbcEvent, IbcShieldedTransfer}; -use namada_core::types::token::DenominatedAmount; +use namada_core::address::{Address, InternalAddress}; +pub use namada_core::ibc::{IbcEvent, IbcShieldedTransfer}; +use namada_core::token::DenominatedAmount; pub use namada_ibc::storage::is_ibc_key; pub use namada_ibc::{ IbcActions, IbcCommonContext, IbcStorageContext, ProofSpec, TransferModule, diff --git a/crates/tx_prelude/src/key.rs b/crates/tx_prelude/src/key.rs index a273d25cc7..7f499b8254 100644 --- a/crates/tx_prelude/src/key.rs +++ b/crates/tx_prelude/src/key.rs @@ -1,6 +1,6 @@ //! Cryptographic signature keys -pub use namada_core::types::key::*; +pub use namada_core::key::*; use super::*; diff --git a/crates/tx_prelude/src/lib.rs b/crates/tx_prelude/src/lib.rs index 1c7b112a38..7f3311aa92 100644 --- a/crates/tx_prelude/src/lib.rs +++ b/crates/tx_prelude/src/lib.rs @@ -17,21 +17,20 @@ use core::slice; use std::marker::PhantomData; use masp_primitives::transaction::Transaction; +use namada_core::account::AccountPublicKeysMap; +pub use namada_core::address::Address; pub use namada_core::borsh::{ BorshDeserialize, BorshSerialize, BorshSerializeExt, }; -pub use namada_core::ledger::eth_bridge; -use namada_core::types::account::AccountPublicKeysMap; -pub use namada_core::types::address::Address; -use namada_core::types::chain::CHAIN_ID_LENGTH; -pub use namada_core::types::ethereum_events::EthAddress; -use namada_core::types::internal::HostEnvResult; -use namada_core::types::key::common; -use namada_core::types::storage::TxIndex; -pub use namada_core::types::storage::{ +use namada_core::chain::CHAIN_ID_LENGTH; +pub use namada_core::ethereum_events::EthAddress; +use namada_core::internal::HostEnvResult; +use namada_core::key::common; +use namada_core::storage::TxIndex; +pub use namada_core::storage::{ self, BlockHash, BlockHeight, Epoch, Header, BLOCK_HASH_LENGTH, }; -pub use namada_core::types::{encode, eth_bridge_pool, *}; +pub use namada_core::{encode, eth_bridge_pool, *}; pub use namada_governance::storage as gov_storage; pub use namada_macros::transaction; pub use namada_parameters::storage as parameters_storage; @@ -161,9 +160,7 @@ impl StorageRead for Ctx { } } - fn get_block_hash( - &self, - ) -> Result { + fn get_block_hash(&self) -> Result { let result = Vec::with_capacity(BLOCK_HASH_LENGTH); unsafe { namada_tx_get_block_hash(result.as_ptr() as _); @@ -174,22 +171,17 @@ impl StorageRead for Ctx { Ok(BlockHash::try_from(slice).expect("Cannot convert the hash")) } - fn get_block_epoch( - &self, - ) -> Result { + fn get_block_epoch(&self) -> Result { Ok(Epoch(unsafe { namada_tx_get_block_epoch() })) } - fn get_pred_epochs( - &self, - ) -> Result { + fn get_pred_epochs(&self) -> Result { let read_result = unsafe { namada_tx_get_pred_epochs() }; let bytes = read_from_buffer(read_result, namada_tx_result_buffer) .ok_or(Error::SimpleMessage( "Missing result from `namada_tx_get_pred_epochs` call", ))?; - Ok(namada_core::types::decode(bytes) - .expect("Cannot decode pred epochs")) + Ok(namada_core::decode(bytes).expect("Cannot decode pred epochs")) } /// Get the native token address diff --git a/crates/tx_prelude/src/proof_of_stake.rs b/crates/tx_prelude/src/proof_of_stake.rs index 50ad3daf73..8e372722c7 100644 --- a/crates/tx_prelude/src/proof_of_stake.rs +++ b/crates/tx_prelude/src/proof_of_stake.rs @@ -1,8 +1,7 @@ //! Proof of Stake system integration with functions for transactions -use namada_core::types::dec::Dec; -use namada_core::types::key::common; -use namada_core::types::{key, token}; +use namada_core::dec::Dec; +use namada_core::{key, token}; pub use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::storage::read_pos_params; use namada_proof_of_stake::types::{ResultSlashing, ValidatorMetaData}; diff --git a/crates/tx_prelude/src/token.rs b/crates/tx_prelude/src/token.rs index 88371c9864..a42794e8bb 100644 --- a/crates/tx_prelude/src/token.rs +++ b/crates/tx_prelude/src/token.rs @@ -1,4 +1,4 @@ -use namada_core::types::address::Address; +use namada_core::address::Address; use namada_proof_of_stake::token::storage_key::{ balance_key, minted_balance_key, minter_key, }; diff --git a/crates/vm_env/src/lib.rs b/crates/vm_env/src/lib.rs index 36306ec387..14f31e2167 100644 --- a/crates/vm_env/src/lib.rs +++ b/crates/vm_env/src/lib.rs @@ -8,7 +8,7 @@ use std::mem::ManuallyDrop; use borsh::BorshDeserialize; -use namada_core::types::internal::{HostEnvResult, KeyVal}; +use namada_core::internal::{HostEnvResult, KeyVal}; /// Transaction environment imports pub mod tx { diff --git a/crates/vote_ext/src/bridge_pool_roots.rs b/crates/vote_ext/src/bridge_pool_roots.rs index 2db9dbdfca..41b1f85b54 100644 --- a/crates/vote_ext/src/bridge_pool_roots.rs +++ b/crates/vote_ext/src/bridge_pool_roots.rs @@ -5,11 +5,11 @@ use std::collections::HashSet; use std::ops::{Deref, DerefMut}; +use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use namada_core::types::address::Address; -use namada_core::types::key::common; -use namada_core::types::key::common::Signature; -use namada_core::types::storage::BlockHeight; +use namada_core::key::common; +use namada_core::key::common::Signature; +use namada_core::storage::BlockHeight; use namada_tx::Signed; /// A vote extension containing a validator's signature diff --git a/crates/vote_ext/src/ethereum_events.rs b/crates/vote_ext/src/ethereum_events.rs index 35733dcdc9..05fa0c3b3a 100644 --- a/crates/vote_ext/src/ethereum_events.rs +++ b/crates/vote_ext/src/ethereum_events.rs @@ -4,11 +4,11 @@ use std::collections::{BTreeSet, HashMap}; use std::ops::Deref; +use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use namada_core::types::address::Address; -use namada_core::types::ethereum_events::EthereumEvent; -use namada_core::types::key::common::{self, Signature}; -use namada_core::types::storage::BlockHeight; +use namada_core::ethereum_events::EthereumEvent; +use namada_core::key::common::{self, Signature}; +use namada_core::storage::BlockHeight; use namada_tx::Signed; /// Type alias for an [`EthereumEventsVext`]. @@ -153,11 +153,9 @@ impl VextDigest { #[cfg(test)] mod tests { - use namada_core::types::address::{self, Address}; - use namada_core::types::ethereum_events::{EthereumEvent, Uint}; - use namada_core::types::hash::Hash; - use namada_core::types::key; - use namada_tx::Signed; + use namada_core::ethereum_events::Uint; + use namada_core::hash::Hash; + use namada_core::{address, key}; use super::*; diff --git a/crates/vote_ext/src/lib.rs b/crates/vote_ext/src/lib.rs index ca49e96801..72fdbb21cd 100644 --- a/crates/vote_ext/src/lib.rs +++ b/crates/vote_ext/src/lib.rs @@ -7,8 +7,8 @@ pub mod validator_set_update; use namada_core::borsh::{ BorshDeserialize, BorshSchema, BorshSerialize, BorshSerializeExt, }; -use namada_core::types::chain::ChainId; -use namada_core::types::key::common; +use namada_core::chain::ChainId; +use namada_core::key::common; use namada_tx::data::protocol::{ProtocolTx, ProtocolTxType}; use namada_tx::data::TxType; use namada_tx::{Signature, Signed, Tx, TxError}; @@ -113,15 +113,15 @@ impl TryFrom<&Tx> for EthereumTxData { fn try_from(tx: &Tx) -> Result { let TxType::Protocol(protocol_tx) = tx.header().tx_type else { - return Err(TxError::Deserialization( - "Expected protocol tx type".into(), - )); - }; + return Err(TxError::Deserialization( + "Expected protocol tx type".into(), + )); + }; let Some(tx_data) = tx.data() else { - return Err(TxError::Deserialization( - "Expected protocol tx type associated data".into(), - )); - }; + return Err(TxError::Deserialization( + "Expected protocol tx type associated data".into(), + )); + }; Self::deserialize(&protocol_tx.tx, &tx_data) } } diff --git a/crates/vote_ext/src/validator_set_update.rs b/crates/vote_ext/src/validator_set_update.rs index 6e4a181319..7c997f39a1 100644 --- a/crates/vote_ext/src/validator_set_update.rs +++ b/crates/vote_ext/src/validator_set_update.rs @@ -4,17 +4,15 @@ use std::cmp::Ordering; use std::collections::HashMap; use std::ops::Deref; +use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use namada_core::types::address::Address; -use namada_core::types::eth_abi::{AbiEncode, Encode, Token}; -use namada_core::types::ethereum_events::EthAddress; -use namada_core::types::keccak::KeccakHash; -use namada_core::types::key::common::{self, Signature}; -use namada_core::types::storage::Epoch; -use namada_core::types::voting_power::{ - EthBridgeVotingPower, FractionalVotingPower, -}; -use namada_core::types::{ethereum_structs, token}; +use namada_core::eth_abi::{AbiEncode, Encode, Token}; +use namada_core::ethereum_events::EthAddress; +use namada_core::keccak::KeccakHash; +use namada_core::key::common::{self, Signature}; +use namada_core::storage::Epoch; +use namada_core::voting_power::{EthBridgeVotingPower, FractionalVotingPower}; +use namada_core::{ethereum_structs, token}; use namada_tx::Signed; // the contract versions and namespaces plugged into validator set hashes @@ -357,7 +355,7 @@ impl From for ethereum_structs::ValidatorSetArgs { ethereum_structs::ValidatorSetArgs { validator_set: validators .into_iter() - .zip(voting_powers.into_iter()) + .zip(voting_powers) .map(|(addr, power)| encode_validator_data(addr, power)) .collect(), nonce: epoch.0.into(), @@ -384,10 +382,10 @@ impl Encode<1> for ValidatorSetArgs { // this is only here so we don't pollute the // outer namespace with serde traits mod tag { - use namada_core::types::eth_abi::{AbiEncode, Encode, Token}; - use namada_core::types::hash::KeccakHasher; - use namada_core::types::keccak::KeccakHash; - use namada_core::types::key::Signable; + use namada_core::eth_abi::{AbiEncode, Encode, Token}; + use namada_core::hash::KeccakHasher; + use namada_core::keccak::KeccakHash; + use namada_core::key::Signable; use serde::{Deserialize, Serialize}; use super::{ @@ -428,7 +426,6 @@ mod tests { use std::str::FromStr; use data_encoding::HEXLOWER; - use namada_core::types::ethereum_events::EthAddress; use super::*; diff --git a/crates/vp_env/src/collection_validation/lazy_map.rs b/crates/vp_env/src/collection_validation/lazy_map.rs index c1fd0949fa..80b83d4cd3 100644 --- a/crates/vp_env/src/collection_validation/lazy_map.rs +++ b/crates/vp_env/src/collection_validation/lazy_map.rs @@ -5,7 +5,7 @@ use core::hash::Hash; use std::collections::HashMap; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::storage; +use namada_core::storage; use namada_storage::collections::lazy_map::{LazyMap, NestedSubKey, SubKey}; use namada_storage::collections::{Nested, Simple}; diff --git a/crates/vp_env/src/collection_validation/lazy_set.rs b/crates/vp_env/src/collection_validation/lazy_set.rs index bfa5828b4a..bc450b6a21 100644 --- a/crates/vp_env/src/collection_validation/lazy_set.rs +++ b/crates/vp_env/src/collection_validation/lazy_set.rs @@ -2,7 +2,7 @@ use std::fmt::Debug; -use namada_core::types::storage; +use namada_core::storage; use namada_storage::collections::lazy_set::{LazySet, SubKey}; use super::LazyCollectionExt; diff --git a/crates/vp_env/src/collection_validation/lazy_vec.rs b/crates/vp_env/src/collection_validation/lazy_vec.rs index 9382c21f22..f6af86c805 100644 --- a/crates/vp_env/src/collection_validation/lazy_vec.rs +++ b/crates/vp_env/src/collection_validation/lazy_vec.rs @@ -4,7 +4,7 @@ use std::collections::BTreeSet; use std::fmt::Debug; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::types::storage; +use namada_core::storage; use namada_storage::collections::lazy_vec::{ Index, LazyVec, SubKey, ValidationError, }; diff --git a/crates/vp_env/src/collection_validation/mod.rs b/crates/vp_env/src/collection_validation/mod.rs index d74053fb6c..a1ba83a683 100644 --- a/crates/vp_env/src/collection_validation/mod.rs +++ b/crates/vp_env/src/collection_validation/mod.rs @@ -8,7 +8,7 @@ use std::fmt::Debug; use derivative::Derivative; use namada_core::borsh::BorshDeserialize; -use namada_core::types::storage; +use namada_core::storage; use namada_storage::collections::LazyCollection; use crate::VpEnv; diff --git a/crates/vp_env/src/lib.rs b/crates/vp_env/src/lib.rs index 9009f72b39..3cbc515343 100644 --- a/crates/vp_env/src/lib.rs +++ b/crates/vp_env/src/lib.rs @@ -5,16 +5,16 @@ pub mod collection_validation; // TODO: this should be re-exported from namada_shielded_token use masp_primitives::transaction::Transaction; +use namada_core::address::Address; use namada_core::borsh::BorshDeserialize; -use namada_core::types::address::Address; -use namada_core::types::hash::Hash; -use namada_core::types::ibc::{ +use namada_core::hash::Hash; +use namada_core::ibc::{ get_shielded_transfer, IbcEvent, MsgShieldedTransfer, EVENT_TYPE_PACKET, }; -use namada_core::types::storage::{ +use namada_core::storage::{ BlockHash, BlockHeight, Epoch, Epochs, Header, Key, TxIndex, }; -use namada_core::types::token::Transfer; +use namada_core::token::Transfer; use namada_storage::{OptionExt, ResultExt, StorageRead}; use namada_tx::Tx; diff --git a/crates/vp_prelude/src/lib.rs b/crates/vp_prelude/src/lib.rs index 0f57f495c4..098d328e26 100644 --- a/crates/vp_prelude/src/lib.rs +++ b/crates/vp_prelude/src/lib.rs @@ -7,28 +7,26 @@ #![deny(rustdoc::private_intra_doc_links)] pub mod ibc { - pub use namada_core::types::ibc::IbcEvent; + pub use namada_core::ibc::IbcEvent; pub use namada_ibc::storage::is_ibc_key; } // used in the VP input -use core::convert::AsRef; use core::slice; pub use std::collections::{BTreeSet, HashSet}; -use std::convert::TryFrom; use std::marker::PhantomData; +pub use namada_core::address::Address; pub use namada_core::borsh::{ BorshDeserialize, BorshSerialize, BorshSerializeExt, }; -pub use namada_core::types::address::Address; -use namada_core::types::chain::CHAIN_ID_LENGTH; -use namada_core::types::hash::{Hash, HASH_LENGTH}; -use namada_core::types::internal::HostEnvResult; -use namada_core::types::storage::{ +use namada_core::chain::CHAIN_ID_LENGTH; +use namada_core::hash::{Hash, HASH_LENGTH}; +use namada_core::internal::HostEnvResult; +use namada_core::storage::{ BlockHash, BlockHeight, Epoch, Epochs, Header, TxIndex, BLOCK_HASH_LENGTH, }; -pub use namada_core::types::*; +pub use namada_core::*; pub use namada_governance::pgf::storage as pgf_storage; pub use namada_governance::storage as gov_storage; pub use namada_macros::validity_predicate; @@ -551,7 +549,7 @@ fn get_pred_epochs() -> Result { "Missing result from `namada_vp_get_pred_epochs` call", ), )?; - Ok(namada_core::types::decode(bytes).expect("Cannot decode pred epochs")) + Ok(namada_core::decode(bytes).expect("Cannot decode pred epochs")) } fn get_native_token() -> Result { diff --git a/docker/namada-wasm/Dockerfile b/docker/namada-wasm/Dockerfile index b6422340bd..1749c1f85b 100644 --- a/docker/namada-wasm/Dockerfile +++ b/docker/namada-wasm/Dockerfile @@ -1,12 +1,12 @@ # This docker is used for deterministic wasm builds # The version should be matching the version set in wasm/rust-toolchain.toml -FROM rust:1.70.0-bullseye +FROM rust:1.76.0-bullseye WORKDIR /__w/namada/namada # The version should be matching the version set above -RUN rustup toolchain install 1.70.0 --profile minimal +RUN rustup toolchain install 1.76.0 --profile minimal RUN rustup target add wasm32-unknown-unknown RUN apt-get update && apt-get install -y \ diff --git a/docker/namada/Dockerfile b/docker/namada/Dockerfile index 6563de29e8..5354628779 100644 --- a/docker/namada/Dockerfile +++ b/docker/namada/Dockerfile @@ -1,4 +1,4 @@ -FROM lukemathwalker/cargo-chef:latest-rust-1.70.0 AS chef +FROM lukemathwalker/cargo-chef:latest-rust-1.76.0 AS chef WORKDIR /app FROM chef AS planner @@ -29,7 +29,7 @@ WORKDIR /app RUN git clone -b v0.37.2 --single-branch https://github.com/cometbft/cometbft.git && cd cometbft && make build -FROM debian:bookworm-slim AS runtime +FROM debian:bullseye-slim AS runtime ENV NAMADA_LOG_COLOR=false RUN apt-get update && apt-get install libcurl4-openssl-dev libudev-dev -y && apt-get clean diff --git a/examples/Cargo.toml b/examples/Cargo.toml index fb2da370fa..278e2b444d 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -19,6 +19,7 @@ path = "generate_txs.rs" [dev-dependencies] masp_proofs = { workspace = true, default-features = false, features = ["local-prover", "download-params"] } namada_sdk = { path = "../crates/sdk", default-features = false, features = ["namada-sdk", "std", "testing"] } +data-encoding.workspace = true proptest.workspace = true serde_json.workspace = true tokio = {workspace = true, default-features = false} diff --git a/examples/generate_txs.rs b/examples/generate_txs.rs index 384f092900..afd8ec4346 100644 --- a/examples/generate_txs.rs +++ b/examples/generate_txs.rs @@ -1,7 +1,9 @@ +use std::collections::BTreeMap; use std::path::PathBuf; +use data_encoding::HEXLOWER; use namada_sdk::signing::to_ledger_vector; -use namada_sdk::testing::arb_tx; +use namada_sdk::testing::arb_signed_tx; use namada_sdk::wallet::fs::FsWalletUtils; use proptest::strategy::{Strategy, ValueTree}; use proptest::test_runner::{Reason, TestRunner}; @@ -13,13 +15,19 @@ async fn main() -> Result<(), Reason> { let mut debug_vectors = vec![]; let mut test_vectors = vec![]; for i in 0..1000 { - let (tx, tx_data) = arb_tx().new_tree(&mut runner)?.current(); + let (tx, tx_data) = arb_signed_tx().new_tree(&mut runner)?.current(); let mut ledger_vector = to_ledger_vector(&wallet, &tx) .await .expect("unable to construct test vector"); + let sechashes = tx.sechashes(); + let mut sechash_map = BTreeMap::new(); + for (idx, sechash) in sechashes.iter().enumerate() { + sechash_map.insert(idx as u8, HEXLOWER.encode(&sechash.0)); + } + sechash_map.insert(0xff, HEXLOWER.encode(&tx.raw_header_hash().0)); ledger_vector.name = format!("{}_{}", i, ledger_vector.name); test_vectors.push(ledger_vector.clone()); - debug_vectors.push((ledger_vector, tx, tx_data)); + debug_vectors.push((ledger_vector, tx, tx_data, sechash_map)); } let args: Vec<_> = std::env::args().collect(); if args.len() < 3 { @@ -29,7 +37,7 @@ async fn main() -> Result<(), Reason> { let json = serde_json::to_string(&test_vectors) .expect("unable to serialize test vectors"); std::fs::write(&args[1], json).expect("unable to save test vectors"); - std::fs::write(&args[2], format!("{:?}", debug_vectors)) + std::fs::write(&args[2], format!("{:#?}", debug_vectors)) .expect("unable to save test vectors"); Ok(()) } diff --git a/rust-nightly-version b/rust-nightly-version index df18b6ae37..99d5905d98 100644 --- a/rust-nightly-version +++ b/rust-nightly-version @@ -1 +1 @@ -nightly-2023-06-01 +nightly-2024-02-20 diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 196d6c41ec..f537877896 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "1.70.0" +channel = "1.76.0" components = ["rustc", "cargo", "rust-std", "rust-docs", "rls", "rust-src", "rust-analysis"] targets = ['wasm32-unknown-unknown'] \ No newline at end of file diff --git a/rustfmt.toml b/rustfmt.toml index 6a1590b990..943b8da0f2 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -45,7 +45,7 @@ remove_nested_parens = true reorder_impl_items = true reorder_imports = true reorder_modules = true -required_version = "1.5.2" +required_version = "1.7.0" skip_children = false space_after_colon = true space_before_colon = false diff --git a/test_fixtures/masp_proofs/README.md b/test_fixtures/masp_proofs/README.md deleted file mode 100644 index 1351183cf3..0000000000 --- a/test_fixtures/masp_proofs/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# MASP proofs for tests - -This directory contains pre-built MASP transaction proofs used to speed-up integration tests. - -```shell -# Run the tests with the saved proofs from here. -make test-integration - -# Delete old proofs, run the tests and save the new proofs. -make test-integration-save-proofs -``` diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index 3d945aa042..40d11066e1 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -50,9 +50,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ "getrandom 0.2.11", "once_cell", @@ -3134,7 +3134,7 @@ dependencies = [ [[package]] name = "masp_note_encryption" version = "1.0.0" -source = "git+https://github.com/anoma/masp?tag=v1.1.0#f24691c0eb76909e3c15ae03aef294dccebd2df3" +source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" dependencies = [ "borsh", "chacha20", @@ -3147,7 +3147,7 @@ dependencies = [ [[package]] name = "masp_primitives" version = "1.0.0" -source = "git+https://github.com/anoma/masp?tag=v1.1.0#f24691c0eb76909e3c15ae03aef294dccebd2df3" +source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" dependencies = [ "aes", "bip0039", @@ -3179,7 +3179,7 @@ dependencies = [ [[package]] name = "masp_proofs" version = "1.0.0" -source = "git+https://github.com/anoma/masp?tag=v1.1.0#f24691c0eb76909e3c15ae03aef294dccebd2df3" +source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" dependencies = [ "bellman", "blake2b_simd", @@ -3316,7 +3316,7 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "namada" -version = "0.31.6" +version = "0.31.7" dependencies = [ "async-trait", "bimap", @@ -3343,6 +3343,7 @@ dependencies = [ "namada_ibc", "namada_parameters", "namada_proof_of_stake", + "namada_replay_protection", "namada_sdk", "namada_state", "namada_token", @@ -3389,7 +3390,7 @@ dependencies = [ [[package]] name = "namada_account" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "namada_core", @@ -3401,7 +3402,7 @@ dependencies = [ [[package]] name = "namada_core" -version = "0.31.6" +version = "0.31.7" dependencies = [ "bech32 0.8.1", "borsh", @@ -3419,7 +3420,6 @@ dependencies = [ "k256", "masp_primitives", "namada_macros", - "num-derive", "num-integer", "num-rational 0.4.1", "num-traits", @@ -3427,7 +3427,6 @@ dependencies = [ "num_enum", "primitive-types", "proptest", - "prost 0.12.3", "prost-types 0.12.3", "rand 0.8.5", "rand_core 0.6.4", @@ -3447,7 +3446,7 @@ dependencies = [ [[package]] name = "namada_ethereum_bridge" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "ethabi", @@ -3476,7 +3475,7 @@ dependencies = [ [[package]] name = "namada_gas" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "namada_core", @@ -3486,14 +3485,14 @@ dependencies = [ [[package]] name = "namada_governance" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "itertools 0.10.5", "namada_core", "namada_macros", "namada_parameters", - "namada_state", + "namada_storage", "namada_trans_token", "proptest", "serde", @@ -3504,7 +3503,7 @@ dependencies = [ [[package]] name = "namada_ibc" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "ibc", @@ -3513,11 +3512,12 @@ dependencies = [ "ics23", "masp_primitives", "namada_core", + "namada_gas", "namada_governance", "namada_parameters", "namada_state", "namada_storage", - "namada_trans_token", + "namada_token", "primitive-types", "proptest", "prost 0.12.3", @@ -3528,7 +3528,7 @@ dependencies = [ [[package]] name = "namada_macros" -version = "0.31.6" +version = "0.31.7" dependencies = [ "proc-macro2", "quote", @@ -3537,7 +3537,7 @@ dependencies = [ [[package]] name = "namada_merkle_tree" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "eyre", @@ -3550,7 +3550,7 @@ dependencies = [ [[package]] name = "namada_parameters" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "namada_core", @@ -3561,7 +3561,7 @@ dependencies = [ [[package]] name = "namada_proof_of_stake" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "data-encoding", @@ -3580,9 +3580,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "namada_replay_protection" +version = "0.31.7" +dependencies = [ + "namada_core", +] + [[package]] name = "namada_sdk" -version = "0.31.6" +version = "0.31.7" dependencies = [ "async-trait", "bimap", @@ -3642,20 +3649,21 @@ dependencies = [ [[package]] name = "namada_shielded_token" -version = "0.31.6" +version = "0.31.7" dependencies = [ + "borsh", "masp_primitives", "namada_core", "namada_parameters", - "namada_state", "namada_storage", "namada_trans_token", + "serde", "tracing", ] [[package]] name = "namada_state" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "ics23", @@ -3664,6 +3672,7 @@ dependencies = [ "namada_gas", "namada_merkle_tree", "namada_parameters", + "namada_replay_protection", "namada_storage", "namada_trans_token", "namada_tx", @@ -3677,13 +3686,14 @@ dependencies = [ [[package]] name = "namada_storage" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "itertools 0.10.5", "namada_core", "namada_gas", "namada_merkle_tree", + "namada_replay_protection", "namada_tx", "thiserror", "tracing", @@ -3691,7 +3701,7 @@ dependencies = [ [[package]] name = "namada_test_utils" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "namada_core", @@ -3700,7 +3710,7 @@ dependencies = [ [[package]] name = "namada_tests" -version = "0.31.6" +version = "0.31.7" dependencies = [ "async-trait", "chrono", @@ -3734,7 +3744,7 @@ dependencies = [ [[package]] name = "namada_token" -version = "0.31.6" +version = "0.31.7" dependencies = [ "namada_core", "namada_shielded_token", @@ -3744,7 +3754,7 @@ dependencies = [ [[package]] name = "namada_trans_token" -version = "0.31.6" +version = "0.31.7" dependencies = [ "namada_core", "namada_storage", @@ -3752,7 +3762,7 @@ dependencies = [ [[package]] name = "namada_tx" -version = "0.31.6" +version = "0.31.7" dependencies = [ "ark-bls12-381", "borsh", @@ -3774,7 +3784,7 @@ dependencies = [ [[package]] name = "namada_tx_env" -version = "0.31.6" +version = "0.31.7" dependencies = [ "namada_core", "namada_storage", @@ -3782,7 +3792,7 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "masp_primitives", @@ -3804,7 +3814,7 @@ dependencies = [ [[package]] name = "namada_vm_env" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "masp_primitives", @@ -3813,7 +3823,7 @@ dependencies = [ [[package]] name = "namada_vote_ext" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "namada_core", @@ -3823,7 +3833,7 @@ dependencies = [ [[package]] name = "namada_vp_env" -version = "0.31.6" +version = "0.31.7" dependencies = [ "derivative", "masp_primitives", @@ -3835,7 +3845,7 @@ dependencies = [ [[package]] name = "namada_vp_prelude" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "namada_account", @@ -3856,7 +3866,7 @@ dependencies = [ [[package]] name = "namada_wasm" -version = "0.31.6" +version = "0.31.7" dependencies = [ "getrandom 0.2.11", "namada", @@ -6227,7 +6237,7 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "tx_template" -version = "0.31.6" +version = "0.31.7" dependencies = [ "getrandom 0.2.11", "namada_tests", @@ -6381,7 +6391,7 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "vp_template" -version = "0.31.6" +version = "0.31.7" dependencies = [ "getrandom 0.2.11", "namada_tests", diff --git a/wasm/checksums.json b/wasm/checksums.json index a6e21a5f4d..9afc875538 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,26 +1,26 @@ { - "tx_become_validator.wasm": "tx_become_validator.05b73a097420725adbff2429818b4d0c05fb5bfbb6e733f309bd5bce6350e595.wasm", - "tx_bond.wasm": "tx_bond.de51f6b2466cf239ecf71770231a3289f33e9d84f6548a5931ef5f8444c77417.wasm", - "tx_bridge_pool.wasm": "tx_bridge_pool.64b1f1b715038d2c03d3b4cf30f541ae1c570772e91702bbad035a56f0563822.wasm", - "tx_change_consensus_key.wasm": "tx_change_consensus_key.cbf20eeb8513714580fe673f29fd72e4e620ac8195c70d0edd4cc29840429deb.wasm", - "tx_change_validator_commission.wasm": "tx_change_validator_commission.bf9b619a1be547a5d37ce7b5e581f0439e7ec6c25bb9319dd244082b91cf389e.wasm", - "tx_change_validator_metadata.wasm": "tx_change_validator_metadata.e3fe1ef96646c456b83bbb24312198c625a5586f23cf053a77a9384f79b0d018.wasm", - "tx_claim_rewards.wasm": "tx_claim_rewards.3e7c2e3d2573094bbf50a277cf24119c19c87939f05b00c0ff9dcf485fc099e2.wasm", - "tx_deactivate_validator.wasm": "tx_deactivate_validator.2cfb4efb067e09f91dd306dd1100feec0127a4e49ac056324ac0f82f503892b4.wasm", - "tx_ibc.wasm": "tx_ibc.1322e0d9ef5eab091605512c4315d774cf81e29b6958aefe95d8fa2db3698e5e.wasm", - "tx_init_account.wasm": "tx_init_account.a1c8590838be1bf9cec3bf671173ca977ccd284ee8206fb45926030072ebe3b9.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.e9a439e4d7a485412cb2d19e84e5d0d1364ce35e70d9e04eedd57a3856b50624.wasm", - "tx_reactivate_validator.wasm": "tx_reactivate_validator.530627430b31317dae981fc978074ae05a9dd1e59f5adcdca7ea63f1612b5d59.wasm", - "tx_redelegate.wasm": "tx_redelegate.5b1649a3ebe01fb10c786150b3969ce1472c25dbadfcbe7acbfff38f0f34d659.wasm", - "tx_resign_steward.wasm": "tx_resign_steward.7f28dc66affb71c7b73ab6b4896a6539706faf54b3272d2e46426566463b1c12.wasm", - "tx_reveal_pk.wasm": "tx_reveal_pk.6add10520c8df0a8cc4b90ba129c5eaad9d4b6f5e6dd354460d327e0670a6b7b.wasm", - "tx_transfer.wasm": "tx_transfer.22952b3a0d59a7ea23e1d3113df0fe9d8603717aa0775471e9d4abeecc9d9fda.wasm", - "tx_unbond.wasm": "tx_unbond.c0a957d0fdfb02dbd664b664ac6ebe86331db4c037613c8a6e3d9bf201c86c1c.wasm", - "tx_unjail_validator.wasm": "tx_unjail_validator.fd36f13ff4977a509be324853272368bd96908fe58a790ff94e95a0dd084213c.wasm", - "tx_update_account.wasm": "tx_update_account.023cbe9ae47f280e420b41994c852462240da7cc57f781ddff9170621d849f03.wasm", - "tx_update_steward_commission.wasm": "tx_update_steward_commission.eff4d667283e912a02cafde99b75d59a5e4a11bc3c714360ad17f47cfc9a458c.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.440df5cf4cc9c895f9df5207c6a48bd82482dd036642d950e4b3ee290488a3a3.wasm", - "tx_withdraw.wasm": "tx_withdraw.f21cac1c96d351bdd8d976120d4fddbafa7c91b560fd235cc612315c9bae0bb3.wasm", - "vp_implicit.wasm": "vp_implicit.baca52bd432996623c104847d6d734da18bdaab421ae6c2888662f7a0757114c.wasm", - "vp_user.wasm": "vp_user.344097348b5b189e9c973dcca811a9d0632062738dee25eca7e73d98e27a4882.wasm" + "tx_become_validator.wasm": "tx_become_validator.b92b24324a4c673c9582b510f56ec8f0b221f642e77eb8604ed47e692759ae75.wasm", + "tx_bond.wasm": "tx_bond.9ec1978606b7d93b7569d3933646a61f61f873c91cae8ed51f9b8cc9bdfd05fb.wasm", + "tx_bridge_pool.wasm": "tx_bridge_pool.31513d6d21c7c03587be620aed7c0bd3398c8b3ad4ffc6fab475bdb583de6696.wasm", + "tx_change_consensus_key.wasm": "tx_change_consensus_key.3e8b04513e7de75a9cef0df9edf740e7ffc8cbcc9e253da712ffec18fdc83137.wasm", + "tx_change_validator_commission.wasm": "tx_change_validator_commission.d74c528c466ce488ea2763d55efc374a5f86ba6b1bfc004563c2a43e949e2108.wasm", + "tx_change_validator_metadata.wasm": "tx_change_validator_metadata.f6018af3d681dab490d9f1a8dc704a228e74bb2696420081e64dce54c925f6ce.wasm", + "tx_claim_rewards.wasm": "tx_claim_rewards.b2755586e2085b39f3e7f7a42fb4c073e009b5344fd728c5a984e71468c659c5.wasm", + "tx_deactivate_validator.wasm": "tx_deactivate_validator.d878e6bd137081b7419d833418392198df461c5622ab06b5626f1515d3fa3986.wasm", + "tx_ibc.wasm": "tx_ibc.b56fe3453d19cc878d823b714d6f290d67de5bf6e3d4d2747427a421d1cd92e9.wasm", + "tx_init_account.wasm": "tx_init_account.c3e5493fea9a0173f6a63346b9c68c0e1176076d72e5ae1141fdb9e0621d1f23.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.540c4da9a2d8a8b989b93ff224eefcbc6efc47002245ef1e69ca795b67667720.wasm", + "tx_reactivate_validator.wasm": "tx_reactivate_validator.bc5ff5226fcff06be75881e6e3a8202af9fd11b064c62472a8ae1b47ce92c861.wasm", + "tx_redelegate.wasm": "tx_redelegate.bd5b044ddbee1dfcb89ef646f9785d677c9a62fe5096fed7b5a2ebb277e04df6.wasm", + "tx_resign_steward.wasm": "tx_resign_steward.cf4dadcb754b6a5a0950cd7ebc2d03fbf6a4caf22c2c1959614bacefef2b0098.wasm", + "tx_reveal_pk.wasm": "tx_reveal_pk.ed0e214d19719217b4c66eaa67eb0daf1aa1796ca3462dfffa296fbdfcea3d21.wasm", + "tx_transfer.wasm": "tx_transfer.de85206b9c65e3751b62d60eea9c7e4adf554e4d6292d301373bf92cbdcadcdb.wasm", + "tx_unbond.wasm": "tx_unbond.f7531570afba9cbd99e2a87ae4d377092d5851209dbd9a885827f6f8e5bd4765.wasm", + "tx_unjail_validator.wasm": "tx_unjail_validator.5e31327662fee0ce58b2639c2e99fd2bf179ec862c6a7670d99ea248945804fb.wasm", + "tx_update_account.wasm": "tx_update_account.91017f9fe798d3119f96a46bf26390e48d3825abdcbf4c1c61b91201595f27eb.wasm", + "tx_update_steward_commission.wasm": "tx_update_steward_commission.b8b7d029e36d9a271431067dd58a6288a4d38ede001da94465673b2946800ba4.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.177ffea6e9929f86494debf68e7f5c1a9a85911694dd8472302cf5b77d8cda14.wasm", + "tx_withdraw.wasm": "tx_withdraw.b04be3d5d7a5beed73e880da7107bba9b5e5577834d87355a9bd06dabf718c3a.wasm", + "vp_implicit.wasm": "vp_implicit.2ccb06e8cc7f8d50a4710ee43110d5bb64d402e4c7b98fb7ce0c9272036cde1d.wasm", + "vp_user.wasm": "vp_user.ec7fadc06824c610eca3c7560c88525529068a32fbafc5190639466fbddc727c.wasm" } \ No newline at end of file diff --git a/wasm/rust-toolchain.toml b/wasm/rust-toolchain.toml index 2658985cb0..16235b717f 100644 --- a/wasm/rust-toolchain.toml +++ b/wasm/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.70.0" +channel = "1.76.0" components = ["rustc", "cargo", "rust-std", "rust-docs", "rls", "rust-analysis"] diff --git a/wasm/tx_template/Cargo.toml b/wasm/tx_template/Cargo.toml index 6af11f6225..55d6b7932f 100644 --- a/wasm/tx_template/Cargo.toml +++ b/wasm/tx_template/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "tx_template" resolver = "2" -version = "0.31.6" +version = "0.31.7" [lib] crate-type = ["cdylib"] diff --git a/wasm/vp_template/Cargo.toml b/wasm/vp_template/Cargo.toml index 7d575f93ea..e876a8ee51 100644 --- a/wasm/vp_template/Cargo.toml +++ b/wasm/vp_template/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "vp_template" resolver = "2" -version = "0.31.6" +version = "0.31.7" [lib] crate-type = ["cdylib"] diff --git a/wasm/wasm_source/Cargo.toml b/wasm/wasm_source/Cargo.toml index 0fc079812e..c48e756af9 100644 --- a/wasm/wasm_source/Cargo.toml +++ b/wasm/wasm_source/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_wasm" resolver = "2" -version = "0.31.6" +version = "0.31.7" [lib] crate-type = ["cdylib"] diff --git a/wasm/wasm_source/src/tx_bond.rs b/wasm/wasm_source/src/tx_bond.rs index 94c2339d49..b4b182034d 100644 --- a/wasm/wasm_source/src/tx_bond.rs +++ b/wasm/wasm_source/src/tx_bond.rs @@ -18,16 +18,18 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { #[cfg(test)] mod tests { + use std::cell::RefCell; use std::collections::BTreeSet; + use namada::core::dec::Dec; + use namada::ledger::gas::VpGasMeter; use namada::ledger::pos::{OwnedPosParams, PosVP}; use namada::proof_of_stake::storage::{ bond_handle, read_consensus_validator_set_addresses_with_stake, read_total_stake, read_validator_stake, }; use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; - use namada::types::dec::Dec; - use namada::types::storage::Epoch; + use namada::validity_predicate::VpSentinel; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; use namada_tests::native_vp::TestNativeVpEnv; @@ -40,7 +42,6 @@ mod tests { use namada_tx_prelude::key::testing::arb_common_keypair; use namada_tx_prelude::key::RefTo; use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; - use namada_tx_prelude::{token, BorshSerializeExt}; use proptest::prelude::*; use super::*; @@ -107,7 +108,7 @@ mod tests { // Ensure that the bond's source has enough tokens for the bond let target = bond.source.as_ref().unwrap_or(&bond.validator); - let native_token = tx_env.wl_storage.storage.native_token.clone(); + let native_token = tx_env.state.in_mem().native_token.clone(); tx_env.credit_tokens(target, &native_token, bond.amount); native_token }); @@ -327,8 +328,12 @@ mod tests { // Use the tx_env to run PoS VP let tx_env = tx_host_env::take(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &tx_env.gas_meter.borrow(), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(PosVP::new); + let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); let result = result.expect("Validation of valid changes must not fail!"); assert!( diff --git a/wasm/wasm_source/src/tx_bridge_pool.rs b/wasm/wasm_source/src/tx_bridge_pool.rs index 47f401f7d9..8d4277a22e 100644 --- a/wasm/wasm_source/src/tx_bridge_pool.rs +++ b/wasm/wasm_source/src/tx_bridge_pool.rs @@ -42,7 +42,7 @@ fn apply_tx(ctx: &mut Ctx, signed: Tx) -> TxResult { token::undenominated_transfer( ctx, sender, - ð_bridge::ADDRESS, + &address::ETH_BRIDGE, &nam_addr, amount, )?; diff --git a/wasm/wasm_source/src/tx_change_validator_commission.rs b/wasm/wasm_source/src/tx_change_validator_commission.rs index ce95c6d780..039296e540 100644 --- a/wasm/wasm_source/src/tx_change_validator_commission.rs +++ b/wasm/wasm_source/src/tx_change_validator_commission.rs @@ -20,13 +20,15 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { #[cfg(test)] mod tests { + use std::cell::RefCell; use std::cmp; + use namada::core::dec::{Dec, POS_DECIMAL_PRECISION}; + use namada::ledger::gas::VpGasMeter; use namada::ledger::pos::{OwnedPosParams, PosVP}; use namada::proof_of_stake::storage::validator_commission_rate_handle; use namada::proof_of_stake::types::GenesisValidator; - use namada::types::dec::{Dec, POS_DECIMAL_PRECISION}; - use namada::types::storage::Epoch; + use namada::validity_predicate::VpSentinel; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; use namada_tests::native_vp::TestNativeVpEnv; @@ -36,7 +38,6 @@ mod tests { use namada_tx_prelude::key::testing::arb_common_keypair; use namada_tx_prelude::key::RefTo; use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; - use namada_tx_prelude::{token, BorshSerializeExt}; use proptest::prelude::*; use super::*; @@ -152,8 +153,12 @@ mod tests { // Use the tx_env to run PoS VP let tx_env = tx_host_env::take(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &tx_env.gas_meter.borrow(), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(PosVP::new); + let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); let result = result.expect("Validation of valid changes must not fail!"); assert!( diff --git a/wasm/wasm_source/src/tx_redelegate.rs b/wasm/wasm_source/src/tx_redelegate.rs index c4f9e240bb..8aeed5612c 100644 --- a/wasm/wasm_source/src/tx_redelegate.rs +++ b/wasm/wasm_source/src/tx_redelegate.rs @@ -22,16 +22,18 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { #[cfg(test)] mod tests { + use std::cell::RefCell; use std::collections::BTreeSet; + use namada::core::dec::Dec; + use namada::ledger::gas::VpGasMeter; use namada::ledger::pos::{OwnedPosParams, PosVP}; use namada::proof_of_stake::storage::{ bond_handle, read_consensus_validator_set_addresses_with_stake, read_total_stake, read_validator_stake, unbond_handle, }; use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; - use namada::types::dec::Dec; - use namada::types::storage::Epoch; + use namada::validity_predicate::VpSentinel; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; use namada_tests::native_vp::TestNativeVpEnv; @@ -41,7 +43,6 @@ mod tests { use namada_tx_prelude::key::testing::arb_common_keypair; use namada_tx_prelude::key::RefTo; use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; - use namada_tx_prelude::{token, BorshSerializeExt}; use proptest::prelude::*; use super::*; @@ -116,7 +117,7 @@ mod tests { init_pos(&genesis_validators[..], &pos_params, Epoch(0)); let native_token = tx_host_env::with(|tx_env| { - let native_token = tx_env.wl_storage.storage.native_token.clone(); + let native_token = tx_env.state.in_mem().native_token.clone(); let owner = &redelegation.owner; tx_env.spawn_accounts([owner]); @@ -362,8 +363,12 @@ mod tests { // Use the tx_env to run PoS VP let tx_env = tx_host_env::take(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &tx_env.gas_meter.borrow(), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(PosVP::new); + let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); let result = result.expect("Validation of valid changes must not fail!"); assert!( diff --git a/wasm/wasm_source/src/tx_unbond.rs b/wasm/wasm_source/src/tx_unbond.rs index 5d982c11bb..b1662649d1 100644 --- a/wasm/wasm_source/src/tx_unbond.rs +++ b/wasm/wasm_source/src/tx_unbond.rs @@ -25,16 +25,18 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { #[cfg(test)] mod tests { + use std::cell::RefCell; use std::collections::BTreeSet; + use namada::core::dec::Dec; + use namada::ledger::gas::VpGasMeter; use namada::ledger::pos::{OwnedPosParams, PosVP}; use namada::proof_of_stake::storage::{ bond_handle, read_consensus_validator_set_addresses_with_stake, read_total_stake, read_validator_stake, unbond_handle, }; use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; - use namada::types::dec::Dec; - use namada::types::storage::Epoch; + use namada::validity_predicate::VpSentinel; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; use namada_tests::native_vp::TestNativeVpEnv; @@ -44,7 +46,6 @@ mod tests { use namada_tx_prelude::key::testing::arb_common_keypair; use namada_tx_prelude::key::RefTo; use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; - use namada_tx_prelude::{token, BorshSerializeExt}; use proptest::prelude::*; use super::*; @@ -114,7 +115,7 @@ mod tests { init_pos(&genesis_validators[..], &pos_params, Epoch(0)); let native_token = tx_host_env::with(|tx_env| { - let native_token = tx_env.wl_storage.storage.native_token.clone(); + let native_token = tx_env.state.in_mem().native_token.clone(); if is_delegation { let source = unbond.source.as_ref().unwrap(); tx_env.spawn_accounts([source]); @@ -340,8 +341,12 @@ mod tests { // Use the tx_env to run PoS VP let tx_env = tx_host_env::take(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &tx_env.gas_meter.borrow(), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(PosVP::new); + let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); let result = result.expect("Validation of valid changes must not fail!"); assert!( diff --git a/wasm/wasm_source/src/tx_withdraw.rs b/wasm/wasm_source/src/tx_withdraw.rs index 260da26c11..824ceafc1d 100644 --- a/wasm/wasm_source/src/tx_withdraw.rs +++ b/wasm/wasm_source/src/tx_withdraw.rs @@ -23,11 +23,14 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { #[cfg(test)] mod tests { + use std::cell::RefCell; + + use namada::core::dec::Dec; + use namada::ledger::gas::VpGasMeter; use namada::ledger::pos::{OwnedPosParams, PosVP}; use namada::proof_of_stake::storage::unbond_handle; use namada::proof_of_stake::types::GenesisValidator; - use namada::types::dec::Dec; - use namada::types::storage::Epoch; + use namada::validity_predicate::VpSentinel; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; use namada_tests::native_vp::TestNativeVpEnv; @@ -40,7 +43,6 @@ mod tests { use namada_tx_prelude::key::testing::arb_common_keypair; use namada_tx_prelude::key::RefTo; use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; - use namada_tx_prelude::BorshSerializeExt; use proptest::prelude::*; use super::*; @@ -113,7 +115,7 @@ mod tests { init_pos(&genesis_validators[..], &pos_params, Epoch(0)); let native_token = tx_host_env::with(|tx_env| { - let native_token = tx_env.wl_storage.storage.native_token.clone(); + let native_token = tx_env.state.in_mem().native_token.clone(); if is_delegation { let source = withdraw.source.as_ref().unwrap(); tx_env.spawn_accounts([source]); @@ -154,8 +156,8 @@ mod tests { + pos_params.unbonding_len + pos_params.cubic_slashing_window_length) { - env.wl_storage.storage.block.epoch = - env.wl_storage.storage.block.epoch.next(); + env.state.in_mem_mut().block.epoch = + env.state.in_mem().block.epoch.next(); } }); let bond_epoch = if is_delegation { @@ -170,7 +172,7 @@ mod tests { ); assert_eq!( - tx_host_env::with(|env| env.wl_storage.storage.block.epoch), + tx_host_env::with(|env| env.state.in_mem().block.epoch), Epoch( pos_params.pipeline_len + pos_params.unbonding_len @@ -224,8 +226,12 @@ mod tests { // Use the tx_env to run PoS VP let tx_env = tx_host_env::take(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &tx_env.gas_meter.borrow(), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(PosVP::new); + let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); let result = result.expect("Validation of valid changes must not fail!"); assert!( diff --git a/wasm/wasm_source/src/vp_implicit.rs b/wasm/wasm_source/src/vp_implicit.rs index 1ff673d233..443fc4e20c 100644 --- a/wasm/wasm_source/src/vp_implicit.rs +++ b/wasm/wasm_source/src/vp_implicit.rs @@ -287,11 +287,11 @@ fn validate_pos_changes( #[cfg(test)] mod tests { // Use this as `#[test]` annotation to enable logging + use namada::core::dec::Dec; + use namada::core::storage::Epoch; use namada::ledger::pos::{GenesisValidator, PosParams}; use namada::tx::data::TxType; use namada::tx::{Code, Data, Signature}; - use namada::types::dec::Dec; - use namada::types::storage::Epoch; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; use namada_tests::tx::{self, tx_host_env, TestTxEnv}; @@ -433,13 +433,13 @@ mod tests { let public_key = secret_key.ref_to(); let vp_owner: Address = (&public_key).into(); let source = address::testing::established_address_2(); - let token = address::nam(); + let token = address::testing::nam(); let amount = token::Amount::from_uint(10_098_123, 0).unwrap(); // Spawn the accounts to be able to modify their storage tx_env.spawn_accounts([&vp_owner, &source, &token]); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -517,7 +517,7 @@ mod tests { let public_key = secret_key.ref_to(); let vp_owner: Address = (&public_key).into(); let target = address::testing::established_address_2(); - let token = address::nam(); + let token = address::testing::nam(); let amount = token::Amount::from_uint(10_098_123, 0).unwrap(); let bond_amount = token::Amount::from_uint(5_098_123, 0).unwrap(); let unbond_amount = token::Amount::from_uint(3_098_123, 0).unwrap(); @@ -531,7 +531,7 @@ mod tests { tx_env.credit_tokens(&vp_owner, &token, amount); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -601,7 +601,7 @@ mod tests { let public_key = secret_key.ref_to(); let vp_owner: Address = (&public_key).into(); let target = address::testing::established_address_2(); - let token = address::nam(); + let token = address::testing::nam(); let amount = token::Amount::from_uint(10_098_123, 0).unwrap(); let bond_amount = token::Amount::from_uint(5_098_123, 0).unwrap(); let unbond_amount = token::Amount::from_uint(3_098_123, 0).unwrap(); @@ -615,7 +615,7 @@ mod tests { tx_env.credit_tokens(&vp_owner, &token, amount); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -666,7 +666,7 @@ mod tests { let public_key = secret_key.ref_to(); let vp_owner: Address = (&public_key).into(); let target = address::testing::established_address_2(); - let token = address::nam(); + let token = address::testing::nam(); let amount = token::Amount::from_uint(10_098_123, 0).unwrap(); // Spawn the accounts to be able to modify their storage @@ -678,7 +678,7 @@ mod tests { tx_env.credit_tokens(&vp_owner, &token, amount); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -725,7 +725,7 @@ mod tests { let public_key = secret_key.ref_to(); let vp_owner: Address = (&public_key).into(); let target = address::testing::established_address_2(); - let token = address::nam(); + let token = address::testing::nam(); let amount = token::Amount::from_uint(10_098_123, 0).unwrap(); tx_env.init_parameters(None, None, None, None); @@ -739,7 +739,7 @@ mod tests { tx_env.credit_tokens(&vp_owner, &token, amount); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -798,7 +798,7 @@ mod tests { let vp_owner: Address = (&public_key).into(); let source = address::testing::established_address_2(); let target = address::testing::established_address_3(); - let token = address::nam(); + let token = address::testing::nam(); let amount = token::Amount::from_uint(10_098_123, 0).unwrap(); // Spawn the accounts to be able to modify their storage @@ -810,7 +810,7 @@ mod tests { tx_env.credit_tokens(&source, &token, amount); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) diff --git a/wasm/wasm_source/src/vp_user.rs b/wasm/wasm_source/src/vp_user.rs index 71c7493375..9ac39b9695 100644 --- a/wasm/wasm_source/src/vp_user.rs +++ b/wasm/wasm_source/src/vp_user.rs @@ -332,11 +332,11 @@ fn validate_pos_changes( #[cfg(test)] mod tests { use address::testing::arb_non_internal_address; + use namada::core::dec::Dec; + use namada::core::storage::Epoch; use namada::ledger::pos::{GenesisValidator, PosParams}; use namada::tx::data::{self, TxType}; use namada::tx::{Code, Data, Signature}; - use namada::types::dec::Dec; - use namada::types::storage::Epoch; use namada_test_utils::TestWasms; // Use this as `#[test]` annotation to enable logging use namada_tests::log::test; @@ -377,7 +377,7 @@ mod tests { let vp_owner = address::testing::established_address_1(); let source = address::testing::established_address_2(); - let token = address::nam(); + let token = address::testing::nam(); let amount = token::Amount::from_uint(10_098_123, 0).unwrap(); // Spawn the accounts to be able to modify their storage @@ -388,7 +388,7 @@ mod tests { tx_env.credit_tokens(&source, &token, amount); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -432,14 +432,14 @@ mod tests { let vp_owner = address::testing::established_address_1(); let target = address::testing::established_address_2(); - let token = address::nam(); + let token = address::testing::nam(); let amount = token::Amount::from_uint(10_098_123, 0).unwrap(); // Spawn the accounts to be able to modify their storage tx_env.spawn_accounts([&vp_owner, &target, &token]); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -489,7 +489,7 @@ mod tests { let keypair = key::testing::keypair_1(); let public_key = keypair.ref_to(); let target = address::testing::established_address_2(); - let token = address::nam(); + let token = address::testing::nam(); let amount = token::Amount::from_uint(10_098_123, 0).unwrap(); // Spawn the accounts to be able to modify their storage @@ -501,7 +501,7 @@ mod tests { tx_env.credit_tokens(&vp_owner, &token, amount); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -584,7 +584,7 @@ mod tests { let public_key = secret_key.ref_to(); let vp_owner: Address = address::testing::established_address_2(); let target = address::testing::established_address_3(); - let token = address::nam(); + let token = address::testing::nam(); let amount = token::Amount::from_uint(10_098_123, 0).unwrap(); let bond_amount = token::Amount::from_uint(5_098_123, 0).unwrap(); let unbond_amount = token::Amount::from_uint(3_098_123, 0).unwrap(); @@ -594,7 +594,7 @@ mod tests { tx_env.init_account_storage(&vp_owner, vec![public_key], 1); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -748,7 +748,7 @@ mod tests { let secret_key = key::testing::keypair_1(); let public_key = secret_key.ref_to(); let target = address::testing::established_address_3(); - let token = address::nam(); + let token = address::testing::nam(); let amount = token::Amount::from_uint(10_098_123, 0).unwrap(); let bond_amount = token::Amount::from_uint(5_098_123, 0).unwrap(); let unbond_amount = token::Amount::from_uint(3_098_123, 0).unwrap(); @@ -758,7 +758,7 @@ mod tests { tx_env.init_account_storage(&validator, vec![public_key], 1); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -844,7 +844,7 @@ mod tests { let public_key = secret_key.ref_to(); let vp_owner: Address = address::testing::established_address_2(); let target = address::testing::established_address_3(); - let token = address::nam(); + let token = address::testing::nam(); let amount = token::Amount::from_uint(10_098_123, 0).unwrap(); let bond_amount = token::Amount::from_uint(5_098_123, 0).unwrap(); let unbond_amount = token::Amount::from_uint(3_098_123, 0).unwrap(); @@ -855,7 +855,7 @@ mod tests { // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -1033,7 +1033,7 @@ mod tests { let secret_key = key::testing::keypair_1(); let public_key = secret_key.ref_to(); let target = address::testing::established_address_3(); - let token = address::nam(); + let token = address::testing::nam(); let amount = token::Amount::from_uint(10_098_123, 0).unwrap(); let bond_amount = token::Amount::from_uint(5_098_123, 0).unwrap(); let unbond_amount = token::Amount::from_uint(3_098_123, 0).unwrap(); @@ -1044,7 +1044,7 @@ mod tests { // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -1109,7 +1109,7 @@ mod tests { let vp_owner = address::testing::established_address_1(); let source = address::testing::established_address_2(); let target = address::testing::established_address_3(); - let token = address::nam(); + let token = address::testing::nam(); let amount = token::Amount::from_uint(10_098_123, 0).unwrap(); // Spawn the accounts to be able to modify their storage diff --git a/wasm_for_tests/tx_fail.wasm b/wasm_for_tests/tx_fail.wasm index de739573fa..31be082977 100755 Binary files a/wasm_for_tests/tx_fail.wasm and b/wasm_for_tests/tx_fail.wasm differ diff --git a/wasm_for_tests/tx_memory_limit.wasm b/wasm_for_tests/tx_memory_limit.wasm index bfe218abc8..ed18d2a84a 100755 Binary files a/wasm_for_tests/tx_memory_limit.wasm and b/wasm_for_tests/tx_memory_limit.wasm differ diff --git a/wasm_for_tests/tx_mint_tokens.wasm b/wasm_for_tests/tx_mint_tokens.wasm index a835370612..03736d7e45 100755 Binary files a/wasm_for_tests/tx_mint_tokens.wasm and b/wasm_for_tests/tx_mint_tokens.wasm differ diff --git a/wasm_for_tests/tx_no_op.wasm b/wasm_for_tests/tx_no_op.wasm index ac151c0b72..1b78bd7666 100755 Binary files a/wasm_for_tests/tx_no_op.wasm and b/wasm_for_tests/tx_no_op.wasm differ diff --git a/wasm_for_tests/tx_proposal_code.wasm b/wasm_for_tests/tx_proposal_code.wasm index cb21eacc54..8e2320e859 100755 Binary files a/wasm_for_tests/tx_proposal_code.wasm and b/wasm_for_tests/tx_proposal_code.wasm differ diff --git a/wasm_for_tests/tx_proposal_masp_reward.wasm b/wasm_for_tests/tx_proposal_masp_reward.wasm index 8260a4635d..1e792cdf3a 100755 Binary files a/wasm_for_tests/tx_proposal_masp_reward.wasm and b/wasm_for_tests/tx_proposal_masp_reward.wasm differ diff --git a/wasm_for_tests/tx_read_storage_key.wasm b/wasm_for_tests/tx_read_storage_key.wasm index 2cbb8b9080..e8f7ea77bd 100755 Binary files a/wasm_for_tests/tx_read_storage_key.wasm and b/wasm_for_tests/tx_read_storage_key.wasm differ diff --git a/wasm_for_tests/tx_write.wasm b/wasm_for_tests/tx_write.wasm index 7a3fcd5413..959ef7735a 100755 Binary files a/wasm_for_tests/tx_write.wasm and b/wasm_for_tests/tx_write.wasm differ diff --git a/wasm_for_tests/tx_write_storage_key.wasm b/wasm_for_tests/tx_write_storage_key.wasm index aa036451e1..5822b157de 100755 Binary files a/wasm_for_tests/tx_write_storage_key.wasm and b/wasm_for_tests/tx_write_storage_key.wasm differ diff --git a/wasm_for_tests/vp_always_false.wasm b/wasm_for_tests/vp_always_false.wasm index bdd5d4b389..0170a7426c 100755 Binary files a/wasm_for_tests/vp_always_false.wasm and b/wasm_for_tests/vp_always_false.wasm differ diff --git a/wasm_for_tests/vp_always_true.wasm b/wasm_for_tests/vp_always_true.wasm index 1a3f636423..733bbe279f 100755 Binary files a/wasm_for_tests/vp_always_true.wasm and b/wasm_for_tests/vp_always_true.wasm differ diff --git a/wasm_for_tests/vp_eval.wasm b/wasm_for_tests/vp_eval.wasm index 02825f64b1..7f68778430 100755 Binary files a/wasm_for_tests/vp_eval.wasm and b/wasm_for_tests/vp_eval.wasm differ diff --git a/wasm_for_tests/vp_memory_limit.wasm b/wasm_for_tests/vp_memory_limit.wasm index bbe7e732d3..c8e406d59a 100755 Binary files a/wasm_for_tests/vp_memory_limit.wasm and b/wasm_for_tests/vp_memory_limit.wasm differ diff --git a/wasm_for_tests/vp_read_storage_key.wasm b/wasm_for_tests/vp_read_storage_key.wasm index f6f3e18fcd..31f0f23258 100755 Binary files a/wasm_for_tests/vp_read_storage_key.wasm and b/wasm_for_tests/vp_read_storage_key.wasm differ diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index 249cd097f2..15807ecca9 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -50,9 +50,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ "getrandom 0.2.11", "once_cell", @@ -3134,7 +3134,7 @@ dependencies = [ [[package]] name = "masp_note_encryption" version = "1.0.0" -source = "git+https://github.com/anoma/masp?tag=v1.1.0#f24691c0eb76909e3c15ae03aef294dccebd2df3" +source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" dependencies = [ "borsh", "chacha20", @@ -3147,7 +3147,7 @@ dependencies = [ [[package]] name = "masp_primitives" version = "1.0.0" -source = "git+https://github.com/anoma/masp?tag=v1.1.0#f24691c0eb76909e3c15ae03aef294dccebd2df3" +source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" dependencies = [ "aes", "bip0039", @@ -3179,7 +3179,7 @@ dependencies = [ [[package]] name = "masp_proofs" version = "1.0.0" -source = "git+https://github.com/anoma/masp?tag=v1.1.0#f24691c0eb76909e3c15ae03aef294dccebd2df3" +source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" dependencies = [ "bellman", "blake2b_simd", @@ -3316,7 +3316,7 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "namada" -version = "0.31.6" +version = "0.31.7" dependencies = [ "async-trait", "bimap", @@ -3343,6 +3343,7 @@ dependencies = [ "namada_ibc", "namada_parameters", "namada_proof_of_stake", + "namada_replay_protection", "namada_sdk", "namada_state", "namada_token", @@ -3389,7 +3390,7 @@ dependencies = [ [[package]] name = "namada_account" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "namada_core", @@ -3401,7 +3402,7 @@ dependencies = [ [[package]] name = "namada_core" -version = "0.31.6" +version = "0.31.7" dependencies = [ "bech32 0.8.1", "borsh", @@ -3419,7 +3420,6 @@ dependencies = [ "k256", "masp_primitives", "namada_macros", - "num-derive", "num-integer", "num-rational 0.4.1", "num-traits", @@ -3427,7 +3427,6 @@ dependencies = [ "num_enum", "primitive-types", "proptest", - "prost 0.12.3", "prost-types 0.12.3", "rand 0.8.5", "rand_core 0.6.4", @@ -3447,7 +3446,7 @@ dependencies = [ [[package]] name = "namada_ethereum_bridge" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "ethabi", @@ -3476,7 +3475,7 @@ dependencies = [ [[package]] name = "namada_gas" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "namada_core", @@ -3486,14 +3485,14 @@ dependencies = [ [[package]] name = "namada_governance" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "itertools 0.10.5", "namada_core", "namada_macros", "namada_parameters", - "namada_state", + "namada_storage", "namada_trans_token", "proptest", "serde", @@ -3504,7 +3503,7 @@ dependencies = [ [[package]] name = "namada_ibc" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "ibc", @@ -3513,11 +3512,12 @@ dependencies = [ "ics23", "masp_primitives", "namada_core", + "namada_gas", "namada_governance", "namada_parameters", "namada_state", "namada_storage", - "namada_trans_token", + "namada_token", "primitive-types", "proptest", "prost 0.12.3", @@ -3528,7 +3528,7 @@ dependencies = [ [[package]] name = "namada_macros" -version = "0.31.6" +version = "0.31.7" dependencies = [ "proc-macro2", "quote", @@ -3537,7 +3537,7 @@ dependencies = [ [[package]] name = "namada_merkle_tree" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "eyre", @@ -3550,7 +3550,7 @@ dependencies = [ [[package]] name = "namada_parameters" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "namada_core", @@ -3561,7 +3561,7 @@ dependencies = [ [[package]] name = "namada_proof_of_stake" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "data-encoding", @@ -3580,9 +3580,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "namada_replay_protection" +version = "0.31.7" +dependencies = [ + "namada_core", +] + [[package]] name = "namada_sdk" -version = "0.31.6" +version = "0.31.7" dependencies = [ "async-trait", "bimap", @@ -3642,20 +3649,21 @@ dependencies = [ [[package]] name = "namada_shielded_token" -version = "0.31.6" +version = "0.31.7" dependencies = [ + "borsh", "masp_primitives", "namada_core", "namada_parameters", - "namada_state", "namada_storage", "namada_trans_token", + "serde", "tracing", ] [[package]] name = "namada_state" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "ics23", @@ -3664,6 +3672,7 @@ dependencies = [ "namada_gas", "namada_merkle_tree", "namada_parameters", + "namada_replay_protection", "namada_storage", "namada_trans_token", "namada_tx", @@ -3677,13 +3686,14 @@ dependencies = [ [[package]] name = "namada_storage" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "itertools 0.10.5", "namada_core", "namada_gas", "namada_merkle_tree", + "namada_replay_protection", "namada_tx", "thiserror", "tracing", @@ -3691,7 +3701,7 @@ dependencies = [ [[package]] name = "namada_test_utils" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "namada_core", @@ -3700,7 +3710,7 @@ dependencies = [ [[package]] name = "namada_tests" -version = "0.31.6" +version = "0.31.7" dependencies = [ "async-trait", "chrono", @@ -3734,7 +3744,7 @@ dependencies = [ [[package]] name = "namada_token" -version = "0.31.6" +version = "0.31.7" dependencies = [ "namada_core", "namada_shielded_token", @@ -3744,7 +3754,7 @@ dependencies = [ [[package]] name = "namada_trans_token" -version = "0.31.6" +version = "0.31.7" dependencies = [ "namada_core", "namada_storage", @@ -3752,7 +3762,7 @@ dependencies = [ [[package]] name = "namada_tx" -version = "0.31.6" +version = "0.31.7" dependencies = [ "ark-bls12-381", "borsh", @@ -3774,7 +3784,7 @@ dependencies = [ [[package]] name = "namada_tx_env" -version = "0.31.6" +version = "0.31.7" dependencies = [ "namada_core", "namada_storage", @@ -3782,7 +3792,7 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "masp_primitives", @@ -3804,7 +3814,7 @@ dependencies = [ [[package]] name = "namada_vm_env" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "masp_primitives", @@ -3813,7 +3823,7 @@ dependencies = [ [[package]] name = "namada_vote_ext" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "namada_core", @@ -3823,7 +3833,7 @@ dependencies = [ [[package]] name = "namada_vp_env" -version = "0.31.6" +version = "0.31.7" dependencies = [ "derivative", "masp_primitives", @@ -3835,7 +3845,7 @@ dependencies = [ [[package]] name = "namada_vp_prelude" -version = "0.31.6" +version = "0.31.7" dependencies = [ "borsh", "namada_account", @@ -3856,7 +3866,7 @@ dependencies = [ [[package]] name = "namada_wasm_for_tests" -version = "0.31.6" +version = "0.31.7" dependencies = [ "getrandom 0.2.11", "namada_test_utils", diff --git a/wasm_for_tests/wasm_source/Cargo.toml b/wasm_for_tests/wasm_source/Cargo.toml index b5d80a65e5..7bea9f247b 100644 --- a/wasm_for_tests/wasm_source/Cargo.toml +++ b/wasm_for_tests/wasm_source/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_wasm_for_tests" resolver = "2" -version = "0.31.6" +version = "0.31.7" [lib] crate-type = ["cdylib"]